From cc0a61197fc1531fc1cb8b56e47383360b640abb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=A7=A6=E8=8B=B1=E6=9D=B0?= <327782001@qq.com> Date: Sat, 13 May 2023 22:49:31 +0800 Subject: [PATCH] =?UTF-8?q?fix:=E6=B7=BB=E5=8A=A0markdown?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- REDEME.md | 57 ++++ doc/jvm.options | 78 ++++++ doc/log4j2.properties | 101 +++++++ doc/logstash-sample.conf | 17 ++ doc/logstash.yml | 247 ++++++++++++++++++ doc/logstash_metadata | 1 + doc/mysql.conf | 43 +++ doc/pipelines.yml | 77 ++++++ doc/startup.options | 53 ++++ doc/test1.conf | 10 + doc/test2.conf | 11 + doc/test3.conf | 17 ++ doc/test4.conf | 26 ++ doc/test5.conf | 30 +++ doc/xc_course_template.json | 95 +++++++ .../java/es_02_index/TestIndex_08_close.java | 3 +- 16 files changed, 865 insertions(+), 1 deletion(-) create mode 100644 REDEME.md create mode 100644 doc/jvm.options create mode 100644 doc/log4j2.properties create mode 100644 doc/logstash-sample.conf create mode 100644 doc/logstash.yml create mode 100644 doc/logstash_metadata create mode 100644 doc/mysql.conf create mode 100644 doc/pipelines.yml create mode 100644 doc/startup.options create mode 100644 doc/test1.conf create mode 100644 doc/test2.conf create mode 100644 doc/test3.conf create mode 100644 doc/test4.conf create mode 100644 doc/test5.conf create mode 100644 doc/xc_course_template.json diff --git a/REDEME.md b/REDEME.md new file mode 100644 index 0000000..2369929 --- /dev/null +++ b/REDEME.md @@ -0,0 +1,57 @@ +## 一.doc + +doc 放的是 logstash 相关的文件 + +### 1.mysql.conf + +mysql 的数据同步到 es + +### 2.xc_course_template.json + +数据库表字段对应索引字段 + +### 3.测试 grok 语法 + +test1.conf + +test2.conf + +test3.conf + +test4.conf + +test5.conf + +## 二.java + +/Users/qinyingjie/Documents/idea-workspace/study/elasticsearch-demo/src/main + +主要是写了一个简单的使用 logstash 同步 mysql 数据到 es,然后通过 es 查询课程信息,然后返回给前端 + +## 三.test + +/Users/qinyingjie/Documents/idea-workspace/study/elasticsearch-demo/src/test + +### 1.es_01_document + +文档 + +### 2.es_02_index + +索引 + +### 3.es_03_search + +搜索 + +### 4.es_04_agg + +聚合 + +### 5.es_05_jdbc + +jdbc-sql + +### 6.es_06_log + +日志 log diff --git a/doc/jvm.options b/doc/jvm.options new file mode 100644 index 0000000..ac27467 --- /dev/null +++ b/doc/jvm.options @@ -0,0 +1,78 @@ +## JVM configuration + +# Xms represents the initial size of total heap space +# Xmx represents the maximum size of total heap space + +-Xms1g +-Xmx1g + +################################################################ +## Expert settings +################################################################ +## +## All settings below this section are considered +## expert settings. Don't tamper with them unless +## you understand what you are doing +## +################################################################ + +## GC configuration +-XX:+UseConcMarkSweepGC +-XX:CMSInitiatingOccupancyFraction=75 +-XX:+UseCMSInitiatingOccupancyOnly + +## Locale +# Set the locale language +#-Duser.language=en + +# Set the locale country +#-Duser.country=US + +# Set the locale variant, if any +#-Duser.variant= + +## basic + +# set the I/O temp directory +#-Djava.io.tmpdir=$HOME + +# set to headless, just in case +-Djava.awt.headless=true + +# ensure UTF-8 encoding by default (e.g. filenames) +-Dfile.encoding=UTF-8 + +# use our provided JNA always versus the system one +#-Djna.nosys=true + +# Turn on JRuby invokedynamic +-Djruby.compile.invokedynamic=true +# Force Compilation +-Djruby.jit.threshold=0 +# Make sure joni regexp interruptability is enabled +-Djruby.regexp.interruptible=true + +## heap dumps + +# generate a heap dump when an allocation from the Java heap fails +# heap dumps are created in the working directory of the JVM +-XX:+HeapDumpOnOutOfMemoryError + +# specify an alternative path for heap dumps +# ensure the directory exists and has sufficient space +#-XX:HeapDumpPath=${LOGSTASH_HOME}/heapdump.hprof + +## GC logging +#-XX:+PrintGCDetails +#-XX:+PrintGCTimeStamps +#-XX:+PrintGCDateStamps +#-XX:+PrintClassHistogram +#-XX:+PrintTenuringDistribution +#-XX:+PrintGCApplicationStoppedTime + +# log GC status to a file with time stamps +# ensure the directory exists +#-Xloggc:${LS_GC_LOG_FILE} + +# Entropy source for randomness +-Djava.security.egd=file:/dev/urandom diff --git a/doc/log4j2.properties b/doc/log4j2.properties new file mode 100644 index 0000000..a9eed0a --- /dev/null +++ b/doc/log4j2.properties @@ -0,0 +1,101 @@ +status = error +name = LogstashPropertiesConfig + +appender.console.type = Console +appender.console.name = plain_console +appender.console.layout.type = PatternLayout +appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %m%n + +appender.json_console.type = Console +appender.json_console.name = json_console +appender.json_console.layout.type = JSONLayout +appender.json_console.layout.compact = true +appender.json_console.layout.eventEol = true + +appender.rolling.type = RollingFile +appender.rolling.name = plain_rolling +appender.rolling.fileName = ${sys:ls.logs}/logstash-${sys:ls.log.format}.log +appender.rolling.filePattern = ${sys:ls.logs}/logstash-${sys:ls.log.format}-%d{yyyy-MM-dd}-%i.log.gz +appender.rolling.policies.type = Policies +appender.rolling.policies.time.type = TimeBasedTriggeringPolicy +appender.rolling.policies.time.interval = 1 +appender.rolling.policies.time.modulate = true +appender.rolling.layout.type = PatternLayout +appender.rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %-.10000m%n +appender.rolling.policies.size.type = SizeBasedTriggeringPolicy +appender.rolling.policies.size.size = 100MB +appender.rolling.strategy.type = DefaultRolloverStrategy +appender.rolling.strategy.max = 30 + +appender.json_rolling.type = RollingFile +appender.json_rolling.name = json_rolling +appender.json_rolling.fileName = ${sys:ls.logs}/logstash-${sys:ls.log.format}.log +appender.json_rolling.filePattern = ${sys:ls.logs}/logstash-${sys:ls.log.format}-%d{yyyy-MM-dd}-%i.log.gz +appender.json_rolling.policies.type = Policies +appender.json_rolling.policies.time.type = TimeBasedTriggeringPolicy +appender.json_rolling.policies.time.interval = 1 +appender.json_rolling.policies.time.modulate = true +appender.json_rolling.layout.type = JSONLayout +appender.json_rolling.layout.compact = true +appender.json_rolling.layout.eventEol = true +appender.json_rolling.policies.size.type = SizeBasedTriggeringPolicy +appender.json_rolling.policies.size.size = 100MB +appender.json_rolling.strategy.type = DefaultRolloverStrategy +appender.json_rolling.strategy.max = 30 + +rootLogger.level = ${sys:ls.log.level} +rootLogger.appenderRef.console.ref = ${sys:ls.log.format}_console +rootLogger.appenderRef.rolling.ref = ${sys:ls.log.format}_rolling + +# Slowlog + +appender.console_slowlog.type = Console +appender.console_slowlog.name = plain_console_slowlog +appender.console_slowlog.layout.type = PatternLayout +appender.console_slowlog.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %m%n + +appender.json_console_slowlog.type = Console +appender.json_console_slowlog.name = json_console_slowlog +appender.json_console_slowlog.layout.type = JSONLayout +appender.json_console_slowlog.layout.compact = true +appender.json_console_slowlog.layout.eventEol = true + +appender.rolling_slowlog.type = RollingFile +appender.rolling_slowlog.name = plain_rolling_slowlog +appender.rolling_slowlog.fileName = ${sys:ls.logs}/logstash-slowlog-${sys:ls.log.format}.log +appender.rolling_slowlog.filePattern = ${sys:ls.logs}/logstash-slowlog-${sys:ls.log.format}-%d{yyyy-MM-dd}-%i.log.gz +appender.rolling_slowlog.policies.type = Policies +appender.rolling_slowlog.policies.time.type = TimeBasedTriggeringPolicy +appender.rolling_slowlog.policies.time.interval = 1 +appender.rolling_slowlog.policies.time.modulate = true +appender.rolling_slowlog.layout.type = PatternLayout +appender.rolling_slowlog.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %.10000m%n +appender.rolling_slowlog.policies.size.type = SizeBasedTriggeringPolicy +appender.rolling_slowlog.policies.size.size = 100MB +appender.rolling_slowlog.strategy.type = DefaultRolloverStrategy +appender.rolling_slowlog.strategy.max = 30 + +appender.json_rolling_slowlog.type = RollingFile +appender.json_rolling_slowlog.name = json_rolling_slowlog +appender.json_rolling_slowlog.fileName = ${sys:ls.logs}/logstash-slowlog-${sys:ls.log.format}.log +appender.json_rolling_slowlog.filePattern = ${sys:ls.logs}/logstash-slowlog-${sys:ls.log.format}-%d{yyyy-MM-dd}-%i.log.gz +appender.json_rolling_slowlog.policies.type = Policies +appender.json_rolling_slowlog.policies.time.type = TimeBasedTriggeringPolicy +appender.json_rolling_slowlog.policies.time.interval = 1 +appender.json_rolling_slowlog.policies.time.modulate = true +appender.json_rolling_slowlog.layout.type = JSONLayout +appender.json_rolling_slowlog.layout.compact = true +appender.json_rolling_slowlog.layout.eventEol = true +appender.json_rolling_slowlog.policies.size.type = SizeBasedTriggeringPolicy +appender.json_rolling_slowlog.policies.size.size = 100MB +appender.json_rolling_slowlog.strategy.type = DefaultRolloverStrategy +appender.json_rolling_slowlog.strategy.max = 30 + +logger.slowlog.name = slowlog +logger.slowlog.level = trace +logger.slowlog.appenderRef.console_slowlog.ref = ${sys:ls.log.format}_console_slowlog +logger.slowlog.appenderRef.rolling_slowlog.ref = ${sys:ls.log.format}_rolling_slowlog +logger.slowlog.additivity = false + +logger.licensereader.name = logstash.licensechecker.licensereader +logger.licensereader.level = error diff --git a/doc/logstash-sample.conf b/doc/logstash-sample.conf new file mode 100644 index 0000000..2fa9229 --- /dev/null +++ b/doc/logstash-sample.conf @@ -0,0 +1,17 @@ +# Sample Logstash configuration for creating a simple +# Beats -> Logstash -> Elasticsearch pipeline. + +input { + beats { + port => 5044 + } +} + +output { + elasticsearch { + hosts => ["http://localhost:9200"] + index => "%{[@metadata][beat]}-%{[@metadata][version]}-%{+YYYY.MM.dd}" + #user => "elastic" + #password => "changeme" + } +} diff --git a/doc/logstash.yml b/doc/logstash.yml new file mode 100644 index 0000000..db8ca3c --- /dev/null +++ b/doc/logstash.yml @@ -0,0 +1,247 @@ +# Settings file in YAML +# +# Settings can be specified either in hierarchical form, e.g.: +# +# pipeline: +# batch: +# size: 125 +# delay: 5 +# +# Or as flat keys: +# +# pipeline.batch.size: 125 +# pipeline.batch.delay: 5 +# +# ------------ Node identity ------------ +# +# Use a descriptive name for the node: +# +# node.name: test +# +# If omitted the node name will default to the machine's host name +# +# ------------ Data path ------------------ +# +# Which directory should be used by logstash and its plugins +# for any persistent needs. Defaults to LOGSTASH_HOME/data +# +# path.data: +# +# ------------ Pipeline Settings -------------- +# +# The ID of the pipeline. +# +# pipeline.id: main +# +# Set the number of workers that will, in parallel, execute the filters+outputs +# stage of the pipeline. +# +# This defaults to the number of the host's CPU cores. +# +# pipeline.workers: 2 +# +# How many events to retrieve from inputs before sending to filters+workers +# +# pipeline.batch.size: 125 +# +# How long to wait in milliseconds while polling for the next event +# before dispatching an undersized batch to filters+outputs +# +# pipeline.batch.delay: 50 +# +# Force Logstash to exit during shutdown even if there are still inflight +# events in memory. By default, logstash will refuse to quit until all +# received events have been pushed to the outputs. +# +# WARNING: enabling this can lead to data loss during shutdown +# +# pipeline.unsafe_shutdown: false +# +# ------------ Pipeline Configuration Settings -------------- +# +# Where to fetch the pipeline configuration for the main pipeline +# +# path.config: +# +# Pipeline configuration string for the main pipeline +# +# config.string: +# +# At startup, test if the configuration is valid and exit (dry run) +# +# config.test_and_exit: false +# +# Periodically check if the configuration has changed and reload the pipeline +# This can also be triggered manually through the SIGHUP signal +# +# config.reload.automatic: false +# +# How often to check if the pipeline configuration has changed (in seconds) +# +# config.reload.interval: 3s +# +# Show fully compiled configuration as debug log message +# NOTE: --log.level must be 'debug' +# +# config.debug: false +# +# When enabled, process escaped characters such as \n and \" in strings in the +# pipeline configuration files. +# +# config.support_escapes: false +# +# ------------ Module Settings --------------- +# Define modules here. Modules definitions must be defined as an array. +# The simple way to see this is to prepend each `name` with a `-`, and keep +# all associated variables under the `name` they are associated with, and +# above the next, like this: +# +# modules: +# - name: MODULE_NAME +# var.PLUGINTYPE1.PLUGINNAME1.KEY1: VALUE +# var.PLUGINTYPE1.PLUGINNAME1.KEY2: VALUE +# var.PLUGINTYPE2.PLUGINNAME1.KEY1: VALUE +# var.PLUGINTYPE3.PLUGINNAME3.KEY1: VALUE +# +# Module variable names must be in the format of +# +# var.PLUGIN_TYPE.PLUGIN_NAME.KEY +# +# modules: +# +# ------------ Cloud Settings --------------- +# Define Elastic Cloud settings here. +# Format of cloud.id is a base64 value e.g. dXMtZWFzdC0xLmF3cy5mb3VuZC5pbyRub3RhcmVhbCRpZGVudGlmaWVy +# and it may have an label prefix e.g. staging:dXMtZ... +# This will overwrite 'var.elasticsearch.hosts' and 'var.kibana.host' +# cloud.id: +# +# Format of cloud.auth is: : +# This is optional +# If supplied this will overwrite 'var.elasticsearch.username' and 'var.elasticsearch.password' +# If supplied this will overwrite 'var.kibana.username' and 'var.kibana.password' +# cloud.auth: elastic: +# +# ------------ Queuing Settings -------------- +# +# Internal queuing model, "memory" for legacy in-memory based queuing and +# "persisted" for disk-based acked queueing. Defaults is memory +# +# queue.type: memory +# +# If using queue.type: persisted, the directory path where the data files will be stored. +# Default is path.data/queue +# +# path.queue: +# +# If using queue.type: persisted, the page data files size. The queue data consists of +# append-only data files separated into pages. Default is 64mb +# +# queue.page_capacity: 64mb +# +# If using queue.type: persisted, the maximum number of unread events in the queue. +# Default is 0 (unlimited) +# +# queue.max_events: 0 +# +# If using queue.type: persisted, the total capacity of the queue in number of bytes. +# If you would like more unacked events to be buffered in Logstash, you can increase the +# capacity using this setting. Please make sure your disk drive has capacity greater than +# the size specified here. If both max_bytes and max_events are specified, Logstash will pick +# whichever criteria is reached first +# Default is 1024mb or 1gb +# +# queue.max_bytes: 1024mb +# +# If using queue.type: persisted, the maximum number of acked events before forcing a checkpoint +# Default is 1024, 0 for unlimited +# +# queue.checkpoint.acks: 1024 +# +# If using queue.type: persisted, the maximum number of written events before forcing a checkpoint +# Default is 1024, 0 for unlimited +# +# queue.checkpoint.writes: 1024 +# +# If using queue.type: persisted, the interval in milliseconds when a checkpoint is forced on the head page +# Default is 1000, 0 for no periodic checkpoint. +# +# queue.checkpoint.interval: 1000 +# +# ------------ Dead-Letter Queue Settings -------------- +# Flag to turn on dead-letter queue. +# +# dead_letter_queue.enable: false + +# If using dead_letter_queue.enable: true, the maximum size of each dead letter queue. Entries +# will be dropped if they would increase the size of the dead letter queue beyond this setting. +# Default is 1024mb +# dead_letter_queue.max_bytes: 1024mb + +# If using dead_letter_queue.enable: true, the directory path where the data files will be stored. +# Default is path.data/dead_letter_queue +# +# path.dead_letter_queue: +# +# ------------ Metrics Settings -------------- +# +# Bind address for the metrics REST endpoint +# +# http.host: "127.0.0.1" +# +# Bind port for the metrics REST endpoint, this option also accept a range +# (9600-9700) and logstash will pick up the first available ports. +# +# http.port: 9600-9700 +# +# ------------ Debugging Settings -------------- +# +# Options for log.level: +# * fatal +# * error +# * warn +# * info (default) +# * debug +# * trace +# +# log.level: info +# path.logs: +# +# ------------ Other Settings -------------- +# +# Where to find custom plugins +# path.plugins: [] +# +# ------------ X-Pack Settings (not applicable for OSS build)-------------- +# +# X-Pack Monitoring +# https://www.elastic.co/guide/en/logstash/current/monitoring-logstash.html +#xpack.monitoring.enabled: false +#xpack.monitoring.elasticsearch.username: logstash_system +#xpack.monitoring.elasticsearch.password: password +#xpack.monitoring.elasticsearch.hosts: ["https://es1:9200", "https://es2:9200"] +#xpack.monitoring.elasticsearch.ssl.certificate_authority: [ "/path/to/ca.crt" ] +#xpack.monitoring.elasticsearch.ssl.truststore.path: path/to/file +#xpack.monitoring.elasticsearch.ssl.truststore.password: password +#xpack.monitoring.elasticsearch.ssl.keystore.path: /path/to/file +#xpack.monitoring.elasticsearch.ssl.keystore.password: password +#xpack.monitoring.elasticsearch.ssl.verification_mode: certificate +#xpack.monitoring.elasticsearch.sniffing: false +#xpack.monitoring.collection.interval: 10s +#xpack.monitoring.collection.pipeline.details.enabled: true +# +# X-Pack Management +# https://www.elastic.co/guide/en/logstash/current/logstash-centralized-pipeline-management.html +#xpack.management.enabled: false +#xpack.management.pipeline.id: ["main", "apache_logs"] +#xpack.management.elasticsearch.username: logstash_admin_user +#xpack.management.elasticsearch.password: password +#xpack.management.elasticsearch.hosts: ["https://es1:9200", "https://es2:9200"] +#xpack.management.elasticsearch.ssl.certificate_authority: [ "/path/to/ca.crt" ] +#xpack.management.elasticsearch.ssl.truststore.path: /path/to/file +#xpack.management.elasticsearch.ssl.truststore.password: password +#xpack.management.elasticsearch.ssl.keystore.path: /path/to/file +#xpack.management.elasticsearch.ssl.keystore.password: password +#xpack.management.elasticsearch.ssl.verification_mode: certificate +#xpack.management.elasticsearch.sniffing: false +#xpack.management.logstash.poll_interval: 5s diff --git a/doc/logstash_metadata b/doc/logstash_metadata new file mode 100644 index 0000000..aea35bf --- /dev/null +++ b/doc/logstash_metadata @@ -0,0 +1 @@ +--- 2019-11-09 18:26:00.192000000 Z diff --git a/doc/mysql.conf b/doc/mysql.conf new file mode 100644 index 0000000..4cca197 --- /dev/null +++ b/doc/mysql.conf @@ -0,0 +1,43 @@ +input { + stdin { + } + jdbc { + jdbc_connection_string => "jdbc:mysql://localhost:3306/xc_course?useUnicode=true&characterEncoding=utf-8&useSSL=true&serverTimezone=UTC" + # the user we wish to excute our statement as + jdbc_user => "root" + jdbc_password => root + # the path to our downloaded jdbc driver + jdbc_driver_library => "D:/develop/maven/repository3/mysql/mysql-connector-java/5.1.41/mysql-connector-java-5.1.41.jar" + # the name of the driver class for mysql + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_paging_enabled => "true" + jdbc_page_size => "50000" + #要执行的sql文件 + #statement_filepath => "/conf/course.sql" + statement => "select * from course_pub where timestamp > date_add(:sql_last_value,INTERVAL 8 HOUR)" + #定时配置 + schedule => "* * * * *" + record_last_run => true + last_run_metadata_path => "D:/ES/logstash-7.3.0/config/logstash_metadata" + } +} + + +output { + elasticsearch { + #ES的ip地址和端口 + hosts => "localhost:9200" + #hosts => ["localhost:9200"] + #ES索引库名称 + index => "xc_course" + document_id => "%{id}" + document_type => "_doc" + template =>"D:/ES/logstash-7.3.0/config/xc_course_template.json" + template_name =>"xc_course" + template_overwrite =>"true" + } + stdout { + #日志输出 + codec => json_lines + } +} diff --git a/doc/pipelines.yml b/doc/pipelines.yml new file mode 100644 index 0000000..5ea6964 --- /dev/null +++ b/doc/pipelines.yml @@ -0,0 +1,77 @@ +# List of pipelines to be loaded by Logstash +# +# This document must be a list of dictionaries/hashes, where the keys/values are pipeline settings. +# Default values for omitted settings are read from the `logstash.yml` file. +# When declaring multiple pipelines, each MUST have its own `pipeline.id`. +# +# Example of two pipelines: +# +# - pipeline.id: test +# pipeline.workers: 1 +# pipeline.batch.size: 1 +# config.string: "input { generator {} } filter { sleep { time => 1 } } output { stdout { codec => dots } }" +# - pipeline.id: another_test +# queue.type: persisted +# path.config: "/tmp/logstash/*.config" +# +# Available options: +# +# # name of the pipeline +# pipeline.id: mylogs +# +# # The configuration string to be used by this pipeline +# config.string: "input { generator {} } filter { sleep { time => 1 } } output { stdout { codec => dots } }" +# +# # The path from where to read the configuration text +# path.config: "/etc/conf.d/logstash/myconfig.cfg" +# +# # How many worker threads execute the Filters+Outputs stage of the pipeline +# pipeline.workers: 1 (actually defaults to number of CPUs) +# +# # How many events to retrieve from inputs before sending to filters+workers +# pipeline.batch.size: 125 +# +# # How long to wait in milliseconds while polling for the next event +# # before dispatching an undersized batch to filters+outputs +# pipeline.batch.delay: 50 +# +# # Internal queuing model, "memory" for legacy in-memory based queuing and +# # "persisted" for disk-based acked queueing. Defaults is memory +# queue.type: memory +# +# # If using queue.type: persisted, the page data files size. The queue data consists of +# # append-only data files separated into pages. Default is 64mb +# queue.page_capacity: 64mb +# +# # If using queue.type: persisted, the maximum number of unread events in the queue. +# # Default is 0 (unlimited) +# queue.max_events: 0 +# +# # If using queue.type: persisted, the total capacity of the queue in number of bytes. +# # Default is 1024mb or 1gb +# queue.max_bytes: 1024mb +# +# # If using queue.type: persisted, the maximum number of acked events before forcing a checkpoint +# # Default is 1024, 0 for unlimited +# queue.checkpoint.acks: 1024 +# +# # If using queue.type: persisted, the maximum number of written events before forcing a checkpoint +# # Default is 1024, 0 for unlimited +# queue.checkpoint.writes: 1024 +# +# # If using queue.type: persisted, the interval in milliseconds when a checkpoint is forced on the head page +# # Default is 1000, 0 for no periodic checkpoint. +# queue.checkpoint.interval: 1000 +# +# # Enable Dead Letter Queueing for this pipeline. +# dead_letter_queue.enable: false +# +# If using dead_letter_queue.enable: true, the maximum size of dead letter queue for this pipeline. Entries +# will be dropped if they would increase the size of the dead letter queue beyond this setting. +# Default is 1024mb +# dead_letter_queue.max_bytes: 1024mb +# +# If using dead_letter_queue.enable: true, the directory path where the data files will be stored. +# Default is path.data/dead_letter_queue +# +# path.dead_letter_queue: diff --git a/doc/startup.options b/doc/startup.options new file mode 100644 index 0000000..3829fdb --- /dev/null +++ b/doc/startup.options @@ -0,0 +1,53 @@ +################################################################################ +# These settings are ONLY used by $LS_HOME/bin/system-install to create a custom +# startup script for Logstash and is not used by Logstash itself. It should +# automagically use the init system (systemd, upstart, sysv, etc.) that your +# Linux distribution uses. +# +# After changing anything here, you need to re-run $LS_HOME/bin/system-install +# as root to push the changes to the init script. +################################################################################ + +# Override Java location +#JAVACMD=/usr/bin/java + +# Set a home directory +LS_HOME=/usr/share/logstash + +# logstash settings directory, the path which contains logstash.yml +LS_SETTINGS_DIR=/etc/logstash + +# Arguments to pass to logstash +LS_OPTS="--path.settings ${LS_SETTINGS_DIR}" + +# Arguments to pass to java +LS_JAVA_OPTS="" + +# pidfiles aren't used the same way for upstart and systemd; this is for sysv users. +LS_PIDFILE=/var/run/logstash.pid + +# user and group id to be invoked as +LS_USER=logstash +LS_GROUP=logstash + +# Enable GC logging by uncommenting the appropriate lines in the GC logging +# section in jvm.options +LS_GC_LOG_FILE=/var/log/logstash/gc.log + +# Open file limit +LS_OPEN_FILES=16384 + +# Nice level +LS_NICE=19 + +# Change these to have the init script named and described differently +# This is useful when running multiple instances of Logstash on the same +# physical box or vm +SERVICE_NAME="logstash" +SERVICE_DESCRIPTION="logstash" + +# If you need to run a command or script before launching Logstash, put it +# between the lines beginning with `read` and `EOM`, and uncomment those lines. +### +## read -r -d '' PRESTART << EOM +## EOM diff --git a/doc/test1.conf b/doc/test1.conf new file mode 100644 index 0000000..72a32a0 --- /dev/null +++ b/doc/test1.conf @@ -0,0 +1,10 @@ +input { + stdin { + } +} + +output { + stdout { + codec=>rubydebug + } +} \ No newline at end of file diff --git a/doc/test2.conf b/doc/test2.conf new file mode 100644 index 0000000..c550fd4 --- /dev/null +++ b/doc/test2.conf @@ -0,0 +1,11 @@ +input { + file { + path => ["D:/ES/logstash-7.3.0/nginx.log"] + start_position => "beginning" + } +} +output { + stdout { + codec=>rubydebug + } +} \ No newline at end of file diff --git a/doc/test3.conf b/doc/test3.conf new file mode 100644 index 0000000..8a6069c --- /dev/null +++ b/doc/test3.conf @@ -0,0 +1,17 @@ +input { + stdin { + } +} + +filter{ + grok{ + match => ["message","%{IP:clientip}\ \[%{HTTPDATE:timestamp}\]\ %{QS:referrer}\ %{NUMBER:response}\ %{NUMBER:bytes}"] + } +} + + +output { + stdout { + codec=>rubydebug + } +} \ No newline at end of file diff --git a/doc/test4.conf b/doc/test4.conf new file mode 100644 index 0000000..1278291 --- /dev/null +++ b/doc/test4.conf @@ -0,0 +1,26 @@ +input { + stdin {} +} + +filter { + grok { + match => { "message" => "%{IP:clientip}\ \[%{HTTPDATE:timestamp}\]\ %{QS:referrer}\ %{NUMBER:response}\ %{NUMBER:bytes}" } + remove_field => [ "message" ] + } + date { + match => ["timestamp", "dd/MMM/yyyy:HH:mm:ss Z"] + } + mutate { + rename => { "response" => "response_new" } + convert => [ "response","float" ] + gsub => ["referrer","\"",""] + remove_field => ["timestamp"] + split => ["clientip", "."] + } +} + +output { + stdout { + codec => "rubydebug" + } +} \ No newline at end of file diff --git a/doc/test5.conf b/doc/test5.conf new file mode 100644 index 0000000..c31c3b9 --- /dev/null +++ b/doc/test5.conf @@ -0,0 +1,30 @@ +input { + file { + path => ["D:/ES/logstash-7.3.0/nginx.log"] + start_position => "beginning" + } +} + +filter { + grok { + match => { "message" => "%{IP:clientip}\ \[%{HTTPDATE:timestamp}\]\ %{QS:referrer}\ %{NUMBER:response}\ %{NUMBER:bytes}" } + remove_field => [ "message" ] + } + date { + match => ["timestamp", "dd/MMM/yyyy:HH:mm:ss Z"] + } + mutate { + rename => { "response" => "response_new" } + convert => [ "response","float" ] + gsub => ["referrer","\"",""] + remove_field => ["timestamp"] + split => ["clientip", "."] + } +} + +output { + elasticsearch { + hosts => ["127.0.0.1:9200"] + index => "logstash-%{+YYYY.MM.dd}" + } +} \ No newline at end of file diff --git a/doc/xc_course_template.json b/doc/xc_course_template.json new file mode 100644 index 0000000..535121d --- /dev/null +++ b/doc/xc_course_template.json @@ -0,0 +1,95 @@ +{ + "mappings" : { + "_doc" : { + "_source" : { + "excludes" : [ "teachplan", "description", "content" ] + }, + "properties" : { + + "content" : { + "analyzer" : "ik_max_word", + "type" : "text" + }, + "description" : { + "copy_to" : [ "content" ], + "type" : "text" + }, + "grade" : { + "type" : "keyword" + }, + "id" : { + "type" : "keyword" + }, + "mt" : { + "type" : "keyword" + }, + "name" : { + "type" : "text", + "analyzer" : "ik_max_word" + }, + "users" : { + "index" : false, + "type" : "text" + }, + "charge" : { + "type" : "keyword" + }, + "valid" : { + "type" : "keyword" + }, + "pic" : { + "index" : false, + "type" : "keyword" + }, + "qq" : { + "index" : false, + "type" : "keyword" + }, + "price" : { + "type" : "float" + }, + "price_old" : { + "type" : "float" + }, + "st" : { + "type" : "keyword" + }, + "status" : { + "type" : "keyword" + }, + "studymodel" : { + "type" : "keyword" + }, + "teachmode" : { + "type" : "keyword" + }, + "teachplan" : { + "copy_to" : [ "content" ], + "type" : "text" + }, + + "expires" : { + "type" : "date", + "format": "yyyy-MM-dd HH:mm:ss" + }, + "pub_time" : { + "type" : "date", + "format": "yyyy-MM-dd HH:mm:ss" + }, + "start_time" : { + "type" : "date", + "format": "yyyy-MM-dd HH:mm:ss" + }, + "end_time" : { + "type" : "date", + "format": "yyyy-MM-dd HH:mm:ss" + } + } + } + }, + "settings" : { + "number_of_shards" : "1" , + "number_of_replicas" : "0" + }, + "template" : "xc_course" +} diff --git a/src/test/java/es_02_index/TestIndex_08_close.java b/src/test/java/es_02_index/TestIndex_08_close.java index d8f6381..3435064 100644 --- a/src/test/java/es_02_index/TestIndex_08_close.java +++ b/src/test/java/es_02_index/TestIndex_08_close.java @@ -1,9 +1,10 @@ package es_02_index; -import org.elasticsearch.action.admin.indices.close.CloseIndexRequest; + import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.RestHighLevelClient; +import org.elasticsearch.client.indices.CloseIndexRequest; import org.junit.Test; import org.junit.runner.RunWith; import org.springframework.beans.factory.annotation.Autowired; -- GitLab