提交 c44f294a 编写于 作者: 小傅哥's avatar 小傅哥

feat:canal 分库分表数据同步

上级
target/
!.mvn/wrapper/maven-wrapper.jar
!**/src/main/**/target/
!**/src/test/**/target/
### IntelliJ IDEA ###
.idea/modules.xml
.idea/jarRepositories.xml
.idea/compiler.xml
.idea/libraries/
*.iws
*.iml
*.ipr
### Eclipse ###
.apt_generated
.classpath
.factorypath
.project
.settings
.springBeans
.sts4-cache
### NetBeans ###
/nbproject/private/
/nbbuild/
/dist/
/nbdist/
/.nb-gradle/
build/
!**/src/main/**/build/
!**/src/test/**/build/
### VS Code ###
.vscode/
### Mac OS ###
.DS_Store
/xfg-dev-tech-app/data/
# 通过MySQL binlog日志,使用canal同步分库分表数据,到 Elasticsearch
```java
PUT xfg_dev_tech.user_order
{
"mappings": {
"properties": {
"_user_id":{"type": "text"},
"_user_name":{"type": "text"},
"_order_id":{"type": "text"},
"_uuid":{"type": "text"},
"_create_time":{"type": "date"},
"_update_time":{"type": "date"}
}
}
}
```
```java
PUT /xfg_dev_tech.user_order/_mapping
{
"properties": {
"_sku_name": {
"type": "text"
}
}
}
```
mysql -uroot -p
show master status // binlog日志文件
reset master; // 重启日志
\ No newline at end of file
server:
port: 8081
spring:
jackson:
date-format: yyyy-MM-dd HH:mm:ss
time-zone: GMT+8
default-property-inclusion: non_null
canal.conf:
mode: tcp #tcp kafka rocketMQ rabbitMQ
flatMessage: true
zookeeperHosts:
syncBatchSize: 1000
retries: -1
timeout:
accessKey:
secretKey:
consumerProperties:
canal.tcp.server.host: canal-server:11111
canal.tcp.zookeeper.hosts:
canal.tcp.batch.size: 500
canal.tcp.username:
canal.tcp.password:
# 需要同步的数据库配置
srcDataSources:
xfg_dev_tech_db_00:
url: jdbc:mysql://mysql:3306/xfg_dev_tech_db_00?useUnicode=true
username: canal
password: canal
xfg_dev_tech_db_01:
url: jdbc:mysql://mysql:3306/xfg_dev_tech_db_01?useUnicode=true
username: canal
password: canal
canalAdapters:
- instance: example # canal instance Name or mq topic name
groups:
- groupId: g1
outerAdapters:
- name: logger
- name: es7
hosts: elasticsearch:9200 # 127.0.0.1:9200 for rest mode
properties:
mode: rest # transport or rest
cluster.name: docker-cluster
dataSourceKey: xfg_dev_tech_db_00
destination: example
groupId: g1
esMapping:
_index: xfg_dev_tech.user_order
_id: _id
sql: "select t.uuid as _id,
t.user_id as _user_id,
t.user_name as _user_name,
t.sku_name as _sku_name,
t.order_id as _order_id,
t.uuid as _uuid,
t.create_time as _create_time,
t.update_time as _update_time
from user_order_0 t"
etlCondition: "where t.update_time>={}"
commitBatch: 3000
dataSourceKey: xfg_dev_tech_db_00
destination: example
groupId: g1
esMapping:
_index: xfg_dev_tech.user_order
_id: _id
sql: "select t.uuid as _id,
t.user_id as _user_id,
t.user_name as _user_name,
t.sku_name as _sku_name,
t.order_id as _order_id,
t.uuid as _uuid,
t.create_time as _create_time,
t.update_time as _update_time
from user_order_1 t"
etlCondition: "where t.update_time>={}"
commitBatch: 3000
dataSourceKey: xfg_dev_tech_db_00
destination: example
groupId: g1
esMapping:
_index: xfg_dev_tech.user_order
_id: _id
sql: "select t.uuid as _id,
t.user_id as _user_id,
t.user_name as _user_name,
t.sku_name as _sku_name,
t.order_id as _order_id,
t.uuid as _uuid,
t.create_time as _create_time,
t.update_time as _update_time
from user_order_2 t"
etlCondition: "where t.update_time>={}"
commitBatch: 3000
dataSourceKey: xfg_dev_tech_db_00
destination: example
groupId: g1
esMapping:
_index: xfg_dev_tech.user_order
_id: _id
sql: "select t.uuid as _id,
t.user_id as _user_id,
t.user_name as _user_name,
t.sku_name as _sku_name,
t.order_id as _order_id,
t.uuid as _uuid,
t.create_time as _create_time,
t.update_time as _update_time
from user_order_3 t"
etlCondition: "where t.update_time>={}"
commitBatch: 3000
dataSourceKey: xfg_dev_tech_db_01
destination: example
groupId: g1
esMapping:
_index: xfg_dev_tech.user_order
_id: _id
sql: "select t.uuid as _id,
t.user_id as _user_id,
t.user_name as _user_name,
t.sku_name as _sku_name,
t.order_id as _order_id,
t.uuid as _uuid,
t.create_time as _create_time,
t.update_time as _update_time
from user_order_0 t"
etlCondition: "where t.update_time>={}"
commitBatch: 3000
dataSourceKey: xfg_dev_tech_db_01
destination: example
groupId: g1
esMapping:
_index: xfg_dev_tech.user_order
_id: _id
sql: "select t.uuid as _id,
t.user_id as _user_id,
t.user_name as _user_name,
t.sku_name as _sku_name,
t.order_id as _order_id,
t.uuid as _uuid,
t.create_time as _create_time,
t.update_time as _update_time
from user_order_1 t"
etlCondition: "where t.update_time>={}"
commitBatch: 3000
dataSourceKey: xfg_dev_tech_db_01
destination: example
groupId: g1
esMapping:
_index: xfg_dev_tech.user_order
_id: _id
sql: "select t.uuid as _id,
t.user_id as _user_id,
t.user_name as _user_name,
t.sku_name as _sku_name,
t.order_id as _order_id,
t.uuid as _uuid,
t.create_time as _create_time,
t.update_time as _update_time
from user_order_2 t"
etlCondition: "where t.update_time>={}"
commitBatch: 3000
dataSourceKey: xfg_dev_tech_db_01
destination: example
groupId: g1
esMapping:
_index: xfg_dev_tech.user_order
_id: _id
sql: "select t.uuid as _id,
t.user_id as _user_id,
t.user_name as _user_name,
t.sku_name as _sku_name,
t.order_id as _order_id,
t.uuid as _uuid,
t.create_time as _create_time,
t.update_time as _update_time
from user_order_3 t"
etlCondition: "where t.update_time>={}"
commitBatch: 3000
#################################################
## mysql serverId , v1.0.26+ will autoGen
canal.instance.mysql.slaveId=3
# enable gtid use true/false
canal.instance.gtidon=false
# position info
#canal.instance.master.address=10.253.11.216:3306
canal.instance.master.address=mysql:3306
canal.instance.master.journal.name=
canal.instance.master.position=
canal.instance.master.timestamp=
canal.instance.master.gtid=
# rds oss binlog
canal.instance.rds.accesskey=
canal.instance.rds.secretkey=
canal.instance.rds.instanceId=
# table meta tsdb info
canal.instance.tsdb.enable=true
#canal.instance.tsdb.url=jdbc:mysql://127.0.0.1:3306/canal_tsdb
#canal.instance.tsdb.dbUsername=canal
#canal.instance.tsdb.dbPassword=canal
#canal.instance.standby.address =
#canal.instance.standby.journal.name =
#canal.instance.standby.position =
#canal.instance.standby.timestamp =
#canal.instance.standby.gtid=
# username/password
canal.instance.dbUsername=canal
canal.instance.dbPassword=canal
canal.instance.connectionCharset = UTF-8
# enable druid Decrypt database password
canal.instance.enableDruid=false
#canal.instance.pwdPublicKey=MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBALK4BUxdDltRRE5/zXpVEVPUgunvscYFtEip3pmLlhrWpacX7y7GCMo2/JM6LeHmiiNdH1FWgGCpUfircSwlWKUCAwEAAQ==
# table regex
canal.instance.filter.regex=.*\\..*
# table black regex
canal.instance.filter.black.regex=mysql\\.slave_.*
# table field filter(format: schema1.tableName1:field1/field2,schema2.tableName2:field1/field2)
#canal.instance.filter.field=test1.t_product:id/subject/keywords,test2.t_company:id/name/contact/ch
# table field black filter(format: schema1.tableName1:field1/field2,schema2.tableName2:field1/field2)
#canal.instance.filter.black.field=test1.t_product:subject/product_image,test2.t_company:id/name/contact/ch
# mq config
canal.mq.topic=example
# dynamic topic route by schema or table regex
#canal.mq.dynamicTopic=mytest1.user,topic2:mytest2\\..*,.*\\..*
canal.mq.partition=0
# hash partition config
#canal.mq.enableDynamicQueuePartition=false
#canal.mq.partitionsNum=3
#canal.mq.dynamicTopicPartitionNum=test.*:4,mycanal:6
#canal.mq.partitionHash=test.table:id^name,.*\\..*
#
# multi stream for polardbx
canal.instance.multi.stream.on=false
#################################################
curl -X PUT "127.0.0.1:9200/xfg_dev_tech.user_order" -H 'Content-Type: application/json' -d'
{
"mappings": {
"properties": {
"_user_id":{"type": "text"},
"_user_name":{"type": "text"},
"_order_id":{"type": "text"},
"_uuid":{"type": "text"},
"_create_time":{"type": "date"},
"_update_time":{"type": "date"}
}
}
}'
\ No newline at end of file
curl -X PUT "127.0.0.1:9200/xfg_dev_tech.user_order/_mapping" -H 'Content-Type: application/json' -d'
{
"properties": {
"_sku_name": {
"type": "text"
}
}
}'
\ No newline at end of file
curl -X DELETE "127.0.0.1:9200/xfg_dev_tech.user_order"
\ No newline at end of file
#
# ** THIS IS AN AUTO-GENERATED FILE **
#
# Default Kibana configuration for docker target
server.host: "0"
server.shutdownTimeout: "5s"
elasticsearch.hosts: [ "http://elasticsearch:9200" ]
monitoring.ui.container.elasticsearch.enabled: true
i18n.locale: "zh-CN"
\ No newline at end of file
input {
tcp {
mode => "server"
host => "0.0.0.0"
port => 4560
codec => json_lines
type => "info"
}
}
filter {}
output {
elasticsearch {
action => "index"
hosts => "es:9200"
index => "springboot-logstash-%{+YYYY.MM.dd}"
}
}
[client]
port = 3306
default-character-set = utf8mb4
[mysqld]
user = mysql
port = 3306
sql_mode = NO_ENGINE_SUBSTITUTION,STRICT_TRANS_TABLES
default-storage-engine = InnoDB
default-authentication-plugin = mysql_native_password
character-set-server = utf8mb4
collation-server = utf8mb4_unicode_ci
init_connect = 'SET NAMES utf8mb4'
log-bin = mysql-bin
binlog-format = row
server-id = 1
binlog-do-db = xfg_dev_tech_db_00
binlog-do-db = xfg_dev_tech_db_01
slow_query_log
#long_query_time = 3
slow-query-log-file = /var/log/mysql/mysql.slow.log
log-error = /var/log/mysql/mysql.error.log
default-time-zone = '+8:00'
[mysql]
default-character-set = utf8mb4
CREATE USER 'canal' IDENTIFIED BY 'canal';
GRANT SELECT, REPLICATION SLAVE, REPLICATION CLIENT ON *.* TO 'canal'@'%';
FLUSH PRIVILEGES;
\ No newline at end of file
此差异已折叠。
此差异已折叠。
# 命令执行 docker-compose -f xfg-dev-tech-canal-docker-compose.yml up -d
version: '3.9'
services:
mysql:
image: mysql:8.0.32
container_name: mysql
command: --default-authentication-plugin=mysql_native_password
restart: always
environment:
TZ: Asia/Shanghai
MYSQL_ROOT_PASSWORD: 123456
depends_on:
- mysql-job-dbdata
ports:
- "13306:3306"
volumes:
- ./mysql/my.cnf:/etc/mysql/conf.d/mysql.cnf:ro
- ./mysql/sql:/docker-entrypoint-initdb.d
volumes_from:
- mysql-job-dbdata
networks:
- default
# 自动加载数据
mysql-job-dbdata:
image: alpine:3.18.2
container_name: mysql-job-dbdata
volumes:
- /var/lib/mysql
# phpmyadmin https://hub.docker.com/_/phpmyadmin
phpmyadmin:
image: phpmyadmin:5.2.1
container_name: phpmyadmin
hostname: phpmyadmin
ports:
- 8899:80
environment:
- PMA_HOST=mysql
- PMA_PORT=3306
- MYSQL_ROOT_PASSWORD=123456
depends_on:
mysql:
condition: service_healthy
networks:
- default
# canal-server 伪装成mysql的从库
# docker cp canal-server:/home/admin/canal-server/conf/example/instance.properties /Users/xiaofuge/Documents/develop/github/xfg-studio/xfg-dev-tech-canal/docs/dev-ops/canal
canal-server:
image: canal/canal-server:v1.1.7
container_name: canal-server
restart: unless-stopped
ports:
- "11111:11111"
volumes:
- ./canal/instance.properties:/home/admin/canal-server/conf/example/instance.properties
depends_on:
- mysql
networks:
- default
# canal-adapter 负责数据分发
# docker cp canal-adapter:/opt/canal-adapter/conf/application.yml /Users/xiaofuge/Documents/develop/github/xfg-studio/xfg-dev-tech-canal/docs/dev-ops/canal-adapter
# docker cp canal-adapter:/opt/canal-adapter/conf/rdb/mytest_user.yml /Users/xiaofuge/Documents/develop/github/xfg-studio/xfg-dev-tech-canal/docs/dev-ops/canal-adapter/rdb
canal-adapter:
image: coopersoft/canal-adapter:v1.1.6
container_name: canal-adapter
restart: unless-stopped
ports:
- "8082:8081"
volumes:
- ./canal-adapter/application.yml:/opt/canal-adapter/conf/application.yml
- ./canal-adapter/es7:/opt/canal-adapter/conf/es7
depends_on:
- canal-server
networks:
- default
elasticsearch:
image: elasticsearch:7.17.14
ports:
- '9200:9200'
- '9300:9300'
container_name: elasticsearch
restart: always
environment:
- 'cluster.name=elasticsearch' # 设置集群名称为elasticsearch
- 'discovery.type=single-node' # 以单一节点模式启动
- "cluster.name=docker-cluster" # 设置名称
- 'ES_JAVA_OPTS=-Xms512m -Xmx512m' # 设置使用jvm内存大小
networks:
- default
kibana:
image: kibana:7.17.14
container_name: kibana
restart: always
volumes:
- /etc/localtime:/etc/localtime
- ./kibana/config/kibana.yml:/usr/share/kibana/config/kibana.yml
ports:
- '5601:5601'
links:
- elasticsearch:es #可以用es这个域名访问elasticsearch服务
environment:
- ELASTICSEARCH_URL=http://elasticsearch:9200 #设置访问elasticsearch的地址
- 'elasticsearch.hosts=http://es:9200' #设置访问elasticsearch的地址
- I18N_LOCALE=zh-CN
networks:
- default
depends_on:
- elasticsearch
networks:
default:
driver: bridge
\ No newline at end of file
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>cn.bugstack</groupId>
<artifactId>xfg-dev-tech-canal</artifactId>
<version>1.0-SNAPSHOT</version>
<packaging>pom</packaging>
<modules>
<module>xfg-dev-tech-app</module>
<module>xfg-dev-tech-infrastructure</module>
</modules>
<properties>
<java.version>1.8</java.version>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<maven.compiler.source>8</maven.compiler.source>
<maven.compiler.target>8</maven.compiler.target>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
</properties>
<parent>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-parent</artifactId>
<version>2.7.12</version>
<relativePath/>
</parent>
<dependencyManagement>
<dependencies>
<dependency>
<groupId>org.mybatis.spring.boot</groupId>
<artifactId>mybatis-spring-boot-starter</artifactId>
<version>2.1.4</version>
</dependency>
<!-- # 多数据源路由配置
# mysql 5.x driver-class-name: com.mysql.jdbc.Driver mysql-connector-java 5.1.34
# mysql 8.x driver-class-name: com.mysql.cj.jdbc.Driver mysql-connector-java 8.0.22-->
<dependency>
<groupId>mysql</groupId>
<artifactId>mysql-connector-java</artifactId>
<version>5.1.34</version>
</dependency>
<dependency>
<groupId>com.alibaba</groupId>
<artifactId>fastjson</artifactId>
<version>2.0.28</version>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-lang3</artifactId>
<version>3.9</version>
</dependency>
<!-- https://mvnrepository.com/artifact/org.apache.shardingsphere/shardingsphere-jdbc-core -->
<dependency>
<groupId>org.apache.shardingsphere</groupId>
<artifactId>shardingsphere-jdbc-core</artifactId>
<version>5.4.1</version>
</dependency>
<!-- https://mvnrepository.com/artifact/org.elasticsearch.plugin/x-pack-sql-jdbc -->
<dependency>
<groupId>org.elasticsearch.plugin</groupId>
<artifactId>x-pack-sql-jdbc</artifactId>
<version>7.17.14</version>
</dependency>
</dependencies>
</dependencyManagement>
<build>
<finalName>xfg-dev-tech-mybatis</finalName>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<version>3.0</version>
<configuration>
<source>${java.version}</source>
<target>${java.version}</target>
<encoding>${project.build.sourceEncoding}</encoding>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-resources-plugin</artifactId>
<version>2.5</version>
<configuration>
<encoding>UTF-8</encoding>
</configuration>
</plugin>
<!-- 统一设定POM版本信息插件 -->
<plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>versions-maven-plugin</artifactId>
<version>2.7</version>
</plugin>
</plugins>
</build>
<profiles>
<profile>
<id>dev</id>
<activation>
<activeByDefault>true</activeByDefault>
</activation>
<properties>
<profileActive>dev</profileActive>
</properties>
</profile>
<profile>
<id>test</id>
<properties>
<profileActive>test</profileActive>
</properties>
</profile>
<profile>
<id>prod</id>
<properties>
<profileActive>prod</profileActive>
</properties>
</profile>
</profiles>
</project>
\ No newline at end of file
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>cn.bugstack</groupId>
<artifactId>xfg-dev-tech-canal</artifactId>
<version>1.0-SNAPSHOT</version>
</parent>
<artifactId>xfg-dev-tech-app</artifactId>
<packaging>jar</packaging>
<dependencies>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-web</artifactId>
<exclusions>
<exclusion>
<artifactId>snakeyaml</artifactId>
<groupId>org.yaml</groupId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-configuration-processor</artifactId>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-test</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.mybatis.spring.boot</groupId>
<artifactId>mybatis-spring-boot-starter</artifactId>
</dependency>
<!-- # 多数据源路由配置
# mysql 5.x driver-class-name: com.mysql.jdbc.Driver mysql-connector-java 5.1.34
# mysql 8.x driver-class-name: com.mysql.cj.jdbc.Driver mysql-connector-java 8.0.22-->
<dependency>
<groupId>mysql</groupId>
<artifactId>mysql-connector-java</artifactId>
</dependency>
<dependency>
<groupId>org.apache.shardingsphere</groupId>
<artifactId>shardingsphere-jdbc-core</artifactId>
</dependency>
<!-- https://mvnrepository.com/artifact/org.yaml/snakeyaml -->
<dependency>
<groupId>org.yaml</groupId>
<artifactId>snakeyaml</artifactId>
<version>1.33</version>
</dependency>
<dependency>
<groupId>com.alibaba</groupId>
<artifactId>fastjson</artifactId>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-lang3</artifactId>
</dependency>
<dependency>
<groupId>org.projectlombok</groupId>
<artifactId>lombok</artifactId>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>commons-beanutils</groupId>
<artifactId>commons-beanutils</artifactId>
<version>1.9.4</version>
</dependency>
<dependency>
<groupId>org.elasticsearch.plugin</groupId>
<artifactId>x-pack-sql-jdbc</artifactId>
</dependency>
<!-- 自身模块 begin -->
<dependency>
<groupId>cn.bugstack</groupId>
<artifactId>xfg-dev-tech-infrastructure</artifactId>
<version>1.0-SNAPSHOT</version>
</dependency>
<!-- 自身模块 end -->
</dependencies>
<build>
<finalName>xfg-dev-tech-app</finalName>
<resources>
<resource>
<directory>src/main/resources</directory>
<filtering>true</filtering>
<includes>
<include>**/**</include>
</includes>
</resource>
</resources>
<testResources>
<testResource>
<directory>src/test/resources</directory>
<filtering>true</filtering>
<includes>
<include>**/**</include>
</includes>
</testResource>
</testResources>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
<version>2.6</version>
<configuration>
<skipTests>true</skipTests>
<testFailureIgnore>false</testFailureIgnore>
<includes>
<include>**/*Test.java</include>
</includes>
</configuration>
</plugin>
<plugin>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-maven-plugin</artifactId>
<configuration>
<mainClass>cn.bugstack.xfg.frame.Application</mainClass>
<layout>JAR</layout>
</configuration>
</plugin>
</plugins>
</build>
</project>
\ No newline at end of file
package cn.bugstack.xfg.dev.tech;
import org.springframework.beans.factory.annotation.Configurable;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
@SpringBootApplication
@Configurable
public class Application {
public static void main(String[] args){
SpringApplication.run(Application.class);
}
}
package cn.bugstack.xfg.dev.tech.config;
import org.apache.ibatis.session.SqlSessionFactory;
import org.elasticsearch.xpack.sql.jdbc.EsDataSource;
import org.mybatis.spring.SqlSessionFactoryBean;
import org.mybatis.spring.annotation.MapperScan;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.boot.jdbc.DataSourceBuilder;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.core.env.Environment;
import org.springframework.core.io.support.PathMatchingResourcePatternResolver;
import javax.sql.DataSource;
@Configuration
public class DataSourceConfig {
@Configuration
@MapperScan(basePackages = "cn.bugstack.xfg.dev.tech.infrastructure.dao.elasticsearch", sqlSessionFactoryRef = "elasticsearchSqlSessionFactory")
static class ElasticsearchMyBatisConfig {
@Bean("elasticsearchDataSource")
@ConfigurationProperties(prefix = "spring.elasticsearch.datasource")
public DataSource igniteDataSource(Environment environment) {
return new EsDataSource();
}
@Bean("elasticsearchSqlSessionFactory")
public SqlSessionFactory elasticsearchSqlSessionFactory(DataSource elasticsearchDataSource) throws Exception {
SqlSessionFactoryBean factoryBean = new SqlSessionFactoryBean();
factoryBean.setDataSource(elasticsearchDataSource);
factoryBean.setMapperLocations(new PathMatchingResourcePatternResolver().getResources("classpath:/mybatis/mapper/elasticsearch/*.xml"));
return factoryBean.getObject();
}
}
@Configuration
@MapperScan(basePackages = "cn.bugstack.xfg.dev.tech.infrastructure.dao.mysql", sqlSessionFactoryRef = "mysqlSqlSessionFactory")
static class MysqlMyBatisConfig {
@Bean("mysqlDataSource")
@ConfigurationProperties(prefix = "spring.mysql.datasource")
public DataSource mysqlDataSource(Environment environment) {
return DataSourceBuilder.create()
.url(environment.getProperty("spring.mysql.datasource.url"))
.driverClassName(environment.getProperty("spring.mysql.datasource.driver-class-name"))
.build();
}
@Bean("mysqlSqlSessionFactory")
public SqlSessionFactory mysqlSqlSessionFactory(DataSource mysqlDataSource) throws Exception {
SqlSessionFactoryBean factoryBean = new SqlSessionFactoryBean();
factoryBean.setDataSource(mysqlDataSource);
factoryBean.setMapperLocations(new PathMatchingResourcePatternResolver().getResources("classpath:/mybatis/mapper/mysql/*.xml"));
return factoryBean.getObject();
}
}
}
server:
port: 8090
spring:
mysql.datasource:
driver-class-name: org.apache.shardingsphere.driver.ShardingSphereDriver
url: jdbc:shardingsphere:classpath:sharding/sharding-jdbc-dev.yaml
elasticsearch.datasource:
driverClassName: org.elasticsearch.xpack.sql.jdbc.EsDriver
url: jdbc:es://http://127.0.0.1:9200
mybatis:
mapper-locations: classpath:/mybatis/mapper/*/*.xml
config-location: classpath:/mybatis/config/mybatis-config.xml
# 日志
logging:
level:
root: info
config: classpath:logback-spring.xml
\ No newline at end of file
server:
port: 8090
spring:
mysql.datasource:
driver-class-name: org.apache.shardingsphere.driver.ShardingSphereDriver
url: jdbc:shardingsphere:classpath:sharding/sharding-jdbc-dev.yaml
elasticsearch.datasource:
driverClassName: org.elasticsearch.xpack.sql.jdbc.EsDriver
url: jdbc:es://http://127.0.0.1:9200
mybatis:
mapper-locations: classpath:/mybatis/mapper/*/*.xml
config-location: classpath:/mybatis/config/mybatis-config.xml
# 日志
logging:
level:
root: info
config: classpath:logback-spring.xml
\ No newline at end of file
server:
port: 8090
spring:
mysql.datasource:
driver-class-name: org.apache.shardingsphere.driver.ShardingSphereDriver
url: jdbc:shardingsphere:classpath:sharding/sharding-jdbc-dev.yaml
elasticsearch.datasource:
driverClassName: org.elasticsearch.xpack.sql.jdbc.EsDriver
url: jdbc:es://http://127.0.0.1:9200
mybatis:
mapper-locations: classpath:/mybatis/mapper/*/*.xml
config-location: classpath:/mybatis/config/mybatis-config.xml
# 日志
logging:
level:
root: info
config: classpath:logback-spring.xml
\ No newline at end of file
spring:
config:
name: xfg-dev-tech-canal
profiles:
active: dev
<?xml version="1.0" encoding="UTF-8"?>
<!-- 日志级别从低到高分为TRACE < DEBUG < INFO < WARN < ERROR < FATAL,如果设置为WARN,则低于WARN的信息都不会输出 -->
<configuration scan="true" scanPeriod="10 seconds">
<contextName>logback</contextName>
<!-- name的值是变量的名称,value的值时变量定义的值。通过定义的值会被插入到logger上下文中。定义变量后,可以使“${}”来使用变量。 -->
<springProperty scope="context" name="log.path" source="logging.path"/>
<!-- 日志格式 -->
<conversionRule conversionWord="clr" converterClass="org.springframework.boot.logging.logback.ColorConverter"/>
<conversionRule conversionWord="wex"
converterClass="org.springframework.boot.logging.logback.WhitespaceThrowableProxyConverter"/>
<conversionRule conversionWord="wEx"
converterClass="org.springframework.boot.logging.logback.ExtendedWhitespaceThrowableProxyConverter"/>
<!-- 输出到控制台 -->
<appender name="CONSOLE" class="ch.qos.logback.core.ConsoleAppender">
<!-- 此日志appender是为开发使用,只配置最底级别,控制台输出的日志级别是大于或等于此级别的日志信息 -->
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>info</level>
</filter>
<encoder>
<pattern>%d{yy-MM-dd.HH:mm:ss.SSS} [%-16t] %-5p %-22c{0}%X{ServiceId} -%X{trace-id} %m%n</pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<!--输出到文件-->
<!-- 时间滚动输出 level为 INFO 日志 -->
<appender name="INFO_FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<!-- 正在记录的日志文件的路径及文件名 -->
<file>./data/log/log_info.log</file>
<!--日志文件输出格式-->
<encoder>
<pattern>%d{yy-MM-dd.HH:mm:ss.SSS} [%-16t] %-5p %-22c{0}%X{ServiceId} -%X{trace-id} %m%n</pattern>
<charset>UTF-8</charset>
</encoder>
<!-- 日志记录器的滚动策略,按日期,按大小记录 -->
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<!-- 每天日志归档路径以及格式 -->
<fileNamePattern>./data/log/log-info-%d{yyyy-MM-dd}.%i.log</fileNamePattern>
<timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
<maxFileSize>100MB</maxFileSize>
</timeBasedFileNamingAndTriggeringPolicy>
<!--日志文件保留天数-->
<maxHistory>15</maxHistory>
<totalSizeCap>10GB</totalSizeCap>
</rollingPolicy>
</appender>
<!-- 时间滚动输出 level为 ERROR 日志 -->
<appender name="ERROR_FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<!-- 正在记录的日志文件的路径及文件名 -->
<file>./data/log/log_error.log</file>
<!--日志文件输出格式-->
<encoder>
<pattern>%d{yy-MM-dd.HH:mm:ss.SSS} [%-16t] %-5p %-22c{0}%X{ServiceId} -%X{trace-id} %m%n</pattern>
<charset>UTF-8</charset>
</encoder>
<!-- 日志记录器的滚动策略,按日期,按大小记录 -->
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<fileNamePattern>./data/log/log-error-%d{yyyy-MM-dd}.%i.log</fileNamePattern>
<timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
<maxFileSize>100MB</maxFileSize>
</timeBasedFileNamingAndTriggeringPolicy>
<!-- 日志文件保留天数【根据服务器预留,可自行调整】 -->
<maxHistory>7</maxHistory>
<totalSizeCap>5GB</totalSizeCap>
</rollingPolicy>
<!-- WARN 级别及以上 -->
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>WARN</level>
</filter>
</appender>
<!-- 异步输出 -->
<appender name="ASYNC_FILE_INFO" class="ch.qos.logback.classic.AsyncAppender">
<!-- 队列剩余容量小于discardingThreshold,则会丢弃TRACT、DEBUG、INFO级别的日志;默认值-1,为queueSize的20%;0不丢失日志 -->
<discardingThreshold>0</discardingThreshold>
<!-- 更改默认的队列的深度,该值会影响性能.默认值为256 -->
<queueSize>8192</queueSize>
<!-- neverBlock:true 会丢失日志,但业务性能不受影响 -->
<neverBlock>true</neverBlock>
<!--是否提取调用者数据-->
<includeCallerData>false</includeCallerData>
<appender-ref ref="INFO_FILE"/>
</appender>
<appender name="ASYNC_FILE_ERROR" class="ch.qos.logback.classic.AsyncAppender">
<!-- 队列剩余容量小于discardingThreshold,则会丢弃TRACT、DEBUG、INFO级别的日志;默认值-1,为queueSize的20%;0不丢失日志 -->
<discardingThreshold>0</discardingThreshold>
<!-- 更改默认的队列的深度,该值会影响性能.默认值为256 -->
<queueSize>1024</queueSize>
<!-- neverBlock:true 会丢失日志,但业务性能不受影响 -->
<neverBlock>true</neverBlock>
<!--是否提取调用者数据-->
<includeCallerData>false</includeCallerData>
<appender-ref ref="ERROR_FILE"/>
</appender>
<!-- 开发环境:控制台打印 -->
<springProfile name="dev">
<logger name="com.nmys.view" level="debug"/>
</springProfile>
<root level="info">
<appender-ref ref="CONSOLE"/>
<!-- 异步日志-INFO -->
<appender-ref ref="ASYNC_FILE_INFO"/>
<!-- 异步日志-ERROR -->
<appender-ref ref="ASYNC_FILE_ERROR"/>
</root>
</configuration>
\ No newline at end of file
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE configuration
PUBLIC "-//mybatis.org//DTD Config 3.0//EN"
"http://mybatis.org/dtd/mybatis-3-config.dtd">
<configuration>
<!-- 暂时未使用 文档:https://mybatis.org/mybatis-3/zh/configuration.html#typeAliases -->
<typeAliases>
</typeAliases>
</configuration>
\ No newline at end of file
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd">
<mapper namespace="cn.bugstack.xfg.dev.tech.infrastructure.dao.elasticsearch.IElasticsearchUserOrderDao">
<resultMap id="dataMap" type="cn.bugstack.xfg.dev.tech.infrastructure.po.UserOrderPO">
<result column="_user_id" property="userId"/>
<result column="_user_name" property="userName"/>
<result column="_order_id" property="orderId"/>
<result column="_sku_name" property="skuName"/>
<result column="_update_time" property="updateTime"/>
<result column="_create_time" property="createTime"/>
</resultMap>
<select id="selectByUserId" resultMap="dataMap">
select _user_id, _user_name, _order_id, _sku_name
from "xfg_dev_tech.user_order"
order by _update_time
limit 10
</select>
</mapper>
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd">
<mapper namespace="cn.bugstack.xfg.dev.tech.infrastructure.dao.mysql.IMySQLUserOrderDao">
<resultMap id="dataMap" type="cn.bugstack.xfg.dev.tech.infrastructure.po.UserOrderPO">
<id column="id" property="id"/>
<result column="user_name" property="userName"/>
<result column="user_id" property="userId"/>
<result column="user_mobile" property="userMobile"/>
<result column="sku" property="sku"/>
<result column="sku_name" property="skuName"/>
<result column="order_id" property="orderId"/>
<result column="quantity" property="quantity"/>
<result column="unit_price" property="unitPrice"/>
<result column="discount_amount" property="discountAmount"/>
<result column="tax" property="tax"/>
<result column="total_amount" property="totalAmount"/>
<result column="order_date" property="orderDate"/>
<result column="order_status" property="orderStatus"/>
<result column="is_delete" property="isDelete"/>
<result column="uuid" property="uuid"/>
<result column="ipv4" property="ipv4"/>
<result column="ipv6" property="ipv6"/>
<result column="ext_data" property="extData"/>
<result column="update_time" property="updateTime"/>
</resultMap>
<select id="insert" parameterType="cn.bugstack.xfg.dev.tech.infrastructure.po.UserOrderPO">
INSERT INTO user_order (user_name, user_id, user_mobile, sku, sku_name,
order_id, quantity, unit_price, discount_amount, tax,
total_amount, order_date, order_status, is_delete, uuid,
ipv4, ipv6, ext_data, update_time, create_time)
VALUES (#{userName}, #{userId}, #{userMobile}, #{sku}, #{skuName},
#{orderId}, #{quantity}, #{unitPrice}, #{discountAmount}, #{tax},
#{totalAmount}, #{orderDate}, #{orderStatus}, #{isDelete}, #{uuid},
INET_ATON(#{ipv4}), INET6_ATON(#{ipv6}), #{extData}, now(), now())
</select>
<update id="updateOrderStatusByUserId" parameterType="java.lang.String">
UPDATE user_order SET order_status = 1 WHERE user_id = #{userId}
</update>
<select id="selectByUserId" parameterType="java.lang.String" resultMap="dataMap">
SELECT id, user_name, user_id, user_mobile, sku, sku_name,
order_id, quantity, unit_price, discount_amount, tax,
total_amount, order_date, order_status, is_delete, uuid,
ipv4, ipv6, ext_data, update_time, create_time
FROM user_order
WHERE user_id = #{userId}
</select>
</mapper>
# https://shardingsphere.apache.org/index_zh.html
mode:
# 运行模式类型。可选配置:内存模式 Memory、单机模式 Standalone、集群模式 Cluster - 目前为单机模式
type: Standalone
dataSources:
ds_0:
dataSourceClassName: com.zaxxer.hikari.HikariDataSource
driverClassName: com.mysql.cj.jdbc.Driver
jdbcUrl: jdbc:mysql://127.0.0.1:13306/xfg_dev_tech_db_00?useUnicode=true&characterEncoding=utf8&autoReconnect=true&zeroDateTimeBehavior=convertToNull&serverTimezone=UTC
username: root
password: 123456
connectionTimeoutMilliseconds: 30000
idleTimeoutMilliseconds: 60000
maxLifetimeMilliseconds: 1800000
maxPoolSize: 15
minPoolSize: 5
ds_1:
dataSourceClassName: com.zaxxer.hikari.HikariDataSource
driverClassName: com.mysql.cj.jdbc.Driver
jdbcUrl: jdbc:mysql://127.0.0.1:13306/xfg_dev_tech_db_01?useUnicode=true&characterEncoding=utf8&autoReconnect=true&zeroDateTimeBehavior=convertToNull&serverTimezone=UTC
username: root
password: 123456
connectionTimeoutMilliseconds: 30000
idleTimeoutMilliseconds: 60000
maxLifetimeMilliseconds: 1800000
maxPoolSize: 15
minPoolSize: 5
rules:
- !SHARDING
# 库的路由
defaultDatabaseStrategy:
standard:
shardingColumn: user_id
shardingAlgorithmName: database_inline
# 表的路由
tables:
user_order:
actualDataNodes: ds_$->{0..1}.user_order_$->{0..3}
tableStrategy:
standard:
shardingColumn: user_id
shardingAlgorithmName: user_order_inline
# 路由算法
shardingAlgorithms:
# 库-路由算法 2是两个库,库的数量。库的数量用哈希模2来计算。
database_inline:
type: INLINE
props:
algorithm-expression: ds_$->{Math.abs(user_id.hashCode()) % 2}
# 表-路由算法 4是一个库里,表的数量。4 - 1 为了获得 011 这样的二进制值。不推荐 user_order_$->{Math.abs(user_id.hashCode()) % 2} 作为表的路由
user_order_inline:
type: INLINE
props:
algorithm-expression: user_order_$->{(user_id.hashCode() ^ (user_id.hashCode()) >>> 16) & (4 - 1)}
props:
# 是否在日志中打印 SQL。
# 打印 SQL 可以帮助开发者快速定位系统问题。日志内容包含:逻辑 SQL,真实 SQL 和 SQL 解析结果。
# 如果开启配置,日志将使用 Topic ShardingSphere-SQL,日志级别是 INFO。 false
sql-show: true
# 是否在日志中打印简单风格的 SQL。false
sql-simple: true
# 用于设置任务处理线程池的大小。每个 ShardingSphereDataSource 使用一个独立的线程池,同一个 JVM 的不同数据源不共享线程池。
executor-size: 20
# 查询请求在每个数据库实例中所能使用的最大连接数。1
max-connections-size-per-query: 1
# 在程序启动和更新时,是否检查分片元数据的结构一致性。
check-table-metadata-enabled: false
# 在程序启动和更新时,是否检查重复表。false
check-duplicate-table-enabled: false
\ No newline at end of file
package cn.bugstack.xfg.dev.tech.test;
import cn.bugstack.xfg.dev.tech.infrastructure.dao.mysql.IMySQLUserOrderDao;
import cn.bugstack.xfg.dev.tech.infrastructure.po.UserOrderPO;
import com.alibaba.fastjson2.JSON;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.RandomStringUtils;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.test.context.junit4.SpringRunner;
import javax.annotation.Resource;
import java.io.IOException;
import java.math.BigDecimal;
import java.sql.*;
import java.util.Date;
import java.util.List;
import java.util.UUID;
@Slf4j
@RunWith(SpringRunner.class)
@SpringBootTest
public class ApiTest {
public static void main(String[] args) {
System.out.println(10001L % 2);
}
@Resource
private IMySQLUserOrderDao userOrderDao;
@Test
public void test_selectByUserId() {
List<UserOrderPO> list = userOrderDao.selectByUserId("xfg_PrmgwQ");
log.info("测试结果:{}", JSON.toJSONString(list));
}
@Test
public void test_insert() throws InterruptedException {
for (int i = 0; i < 3; i++) {
UserOrderPO userOrderPO = UserOrderPO.builder()
.userName("小傅哥")
.userId("xfg_" + RandomStringUtils.randomAlphabetic(6))
.userMobile("+86 13521408***")
.sku("13811216")
.skuName("《手写MyBatis:渐进式源码实践》")
.orderId(RandomStringUtils.randomNumeric(11))
.quantity(1)
.unitPrice(BigDecimal.valueOf(128))
.discountAmount(BigDecimal.valueOf(50))
.tax(BigDecimal.ZERO)
.totalAmount(BigDecimal.valueOf(78))
.orderDate(new Date())
.orderStatus(0)
.isDelete(0)
.uuid(UUID.randomUUID().toString().replace("-", ""))
.ipv4("127.0.0.1")
.ipv6("2001:0db8:85a3:0000:0000:8a2e:0370:7334".getBytes())
.extData("{\"device\": {\"machine\": \"IPhone 14 Pro\", \"location\": \"shanghai\"}}")
.build();
userOrderDao.insert(userOrderPO);
Thread.sleep(100);
}
}
/**
* 路由测试
*/
@Test
public void test_idx() {
for (int i = 0; i < 50; i++) {
String user_id = "xfg_" + RandomStringUtils.randomAlphabetic(6);
log.info("测试结果 {}", (user_id.hashCode() ^ (user_id.hashCode()) >>> 16) & 3);
}
}
/**
* 在 Redisson 中,当你调用 getMap 方法时,如果指定的 key 不存在,Redisson 并不会立即在 Redis 数据库中创建这个 key。相反,它会返回一个 RMap 对象的实例,这个实例是一个本地的 Java 对象,它代表了 Redis 中的一个哈希(hash)。
* <p>
* 当你开始使用这个 RMap 实例进行操作,比如添加键值对,那么 Redisson 会在 Redis 数据库中创建相应的 key,并将数据存储在这个 key 对应的哈希中。如果你只是获取了 RMap 实例而没有进行任何操作,那么在 Redis 数据库中是不会有任何变化的。
* <p>
* 简单来说,getMap 方法返回的 RMap 对象是懒加载的,只有在你实际进行操作时,Redis 数据库中的数据结构才会被创建或修改。
*/
@Test
public void test_query_es() throws IOException {
String address = "jdbc:es://http://127.0.0.1:9200";
Connection connection = null;
try {
connection = DriverManager.getConnection(address);
Statement statement = connection.createStatement();
ResultSet results = statement.executeQuery("select * from \"xfg_dev_tech.user_order\"");
while (results.next()) {
System.out.println(results.getString(1));
}
} catch (Exception e) {
e.printStackTrace();
} finally {
try {
if (connection != null && !connection.isClosed()) {
connection.close();
}
} catch (SQLException e) {
e.printStackTrace();
}
}
}
}
package cn.bugstack.xfg.dev.tech.test.elasticsearch;
import cn.bugstack.xfg.dev.tech.infrastructure.dao.elasticsearch.IElasticsearchUserOrderDao;
import cn.bugstack.xfg.dev.tech.infrastructure.po.UserOrderPO;
import com.alibaba.fastjson2.JSON;
import lombok.extern.slf4j.Slf4j;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.test.context.junit4.SpringRunner;
import javax.annotation.Resource;
import java.util.List;
@Slf4j
@RunWith(SpringRunner.class)
@SpringBootTest
public class UserOrderDaoTest {
@Resource
private IElasticsearchUserOrderDao userOrderDao;
@Test
public void test() {
List<UserOrderPO> userOrderPOS = userOrderDao.selectByUserId();
log.info("测试结果:{}", JSON.toJSONString(userOrderPOS));
}
}
package cn.bugstack.xfg.dev.tech.test.mysql;
import cn.bugstack.xfg.dev.tech.infrastructure.dao.mysql.IMySQLUserOrderDao;
import cn.bugstack.xfg.dev.tech.infrastructure.po.UserOrderPO;
import com.alibaba.fastjson2.JSON;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.RandomStringUtils;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.test.context.junit4.SpringRunner;
import javax.annotation.Resource;
import java.math.BigDecimal;
import java.util.Date;
import java.util.List;
import java.util.UUID;
@Slf4j
@RunWith(SpringRunner.class)
@SpringBootTest
public class UserOrderDaoTest {
@Resource
private IMySQLUserOrderDao userOrderDao;
@Test
public void test_selectByUserId() {
List<UserOrderPO> list = userOrderDao.selectByUserId("xfg_PrmgwQ");
log.info("测试结果:{}", JSON.toJSONString(list));
}
@Test
public void test_insert() throws InterruptedException {
for (int i = 0; i < 3; i++) {
UserOrderPO userOrderPO = UserOrderPO.builder()
.userName("小傅哥")
.userId("xfg_" + RandomStringUtils.randomAlphabetic(6))
.userMobile("+86 13521408***")
.sku("13811216")
.skuName("《手写MyBatis:渐进式源码实践》")
.orderId(RandomStringUtils.randomNumeric(11))
.quantity(1)
.unitPrice(BigDecimal.valueOf(128))
.discountAmount(BigDecimal.valueOf(50))
.tax(BigDecimal.ZERO)
.totalAmount(BigDecimal.valueOf(78))
.orderDate(new Date())
.orderStatus(0)
.isDelete(0)
.uuid(UUID.randomUUID().toString().replace("-", ""))
.ipv4("127.0.0.1")
.ipv6("2001:0db8:85a3:0000:0000:8a2e:0370:7334".getBytes())
.extData("{\"device\": {\"machine\": \"IPhone 14 Pro\", \"location\": \"shanghai\"}}")
.build();
userOrderDao.insert(userOrderPO);
Thread.sleep(100);
}
}
}
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>cn.bugstack</groupId>
<artifactId>xfg-dev-tech-canal</artifactId>
<version>1.0-SNAPSHOT</version>
</parent>
<artifactId>xfg-dev-tech-infrastructure</artifactId>
<dependencies>
<dependency>
<groupId>org.mybatis.spring.boot</groupId>
<artifactId>mybatis-spring-boot-starter</artifactId>
</dependency>
<dependency>
<groupId>org.projectlombok</groupId>
<artifactId>lombok</artifactId>
</dependency>
</dependencies>
<build>
<finalName>xfg-dev-tech-infrastructure</finalName>
<plugins>
<!-- 编译plugin -->
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<configuration>
<source>${java.version}</source>
<target>${java.version}</target>
<compilerVersion>${java.version}</compilerVersion>
</configuration>
</plugin>
</plugins>
</build>
</project>
\ No newline at end of file
package cn.bugstack.xfg.dev.tech.infrastructure.dao.elasticsearch;
import cn.bugstack.xfg.dev.tech.infrastructure.po.UserOrderPO;
import org.apache.ibatis.annotations.Mapper;
import java.util.List;
@Mapper
public interface IElasticsearchUserOrderDao {
List<UserOrderPO> selectByUserId();
}
package cn.bugstack.xfg.dev.tech.infrastructure.dao.mysql;
import cn.bugstack.xfg.dev.tech.infrastructure.po.UserOrderPO;
import org.apache.ibatis.annotations.Mapper;
import java.util.List;
@Mapper
public interface IMySQLUserOrderDao {
void insert(UserOrderPO userOrderPO);
void updateOrderStatusByUserId(String userId);
List<UserOrderPO> selectByUserId(String userId);
}
package cn.bugstack.xfg.dev.tech.infrastructure.po;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
import lombok.NoArgsConstructor;
import java.math.BigDecimal;
import java.util.Date;
@Data
@Builder
@AllArgsConstructor
@NoArgsConstructor
public class UserOrderPO {
/** 自增ID */
private Long id;
/** 用户姓名 */
private String userName;
/** 用户编号 */
private String userId;
/** 用户电话 */
private String userMobile;
/** 商品编号 */
private String sku;
/** 商品名称 */
private String skuName;
/** 订单ID */
private String orderId;
/** 商品数量 */
private int quantity;
/** 商品价格 */
private BigDecimal unitPrice;
/** 折扣金额 */
private BigDecimal discountAmount;
/** 费率金额 */
private BigDecimal tax;
/** 支付金额 */
private BigDecimal totalAmount;
/** 订单日期 */
private Date orderDate;
/** 订单状态 */
private int orderStatus;
/** 逻辑删单 */
private int isDelete;
/** 唯一索引 */
private String uuid;
/** 设备地址 */
private String ipv4;
/** 设备地址 */
private byte[] ipv6;
/** 扩展数据 */
private String extData;
/** 更新时间 */
private Date updateTime;
/** 创建时间 */
private Date createTime;
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册