提交 8846c078 编写于 作者: S Shengliang Guan

Merge remote-tracking branch 'origin/develop' into feature/wal

......@@ -90,7 +90,7 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic
```mysql
ALTER DATABASE db_name REPLICA 2;
```
REPLICA参数是指修改数据库副本数,取值范围[1, 3]。在集群中使用,副本数必须小于dnode的数目。
REPLICA参数是指修改数据库副本数,取值范围[1, 3]。在集群中使用,副本数必须小于或等于dnode的数目。
```mysql
ALTER DATABASE db_name KEEP 365;
......
......@@ -5,14 +5,13 @@
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>2.0.9</version>
<version>2.0.10</version>
<packaging>jar</packaging>
<name>JDBCDriver</name>
<url>https://github.com/taosdata/TDengine/tree/master/src/connector/jdbc</url>
<description>TDengine JDBC Driver</description>
<licenses>
<license>
<name>GNU AFFERO GENERAL PUBLIC LICENSE Version 3</name>
......
这个example中,我们适配了java常见的连接池:
* c3p0
* dbcp
* druid
* HikariCP
### 说明
ConnectionPoolDemo的程序逻辑:
1. 创建到host的connection连接池
2. 创建名称为pool_test的database,创建表超级weather,创建tableSize个子表
3. 不断向所有子表进行插入。
### 如何运行这个例子:
```shell script
# mvn exec:java -Dexec.mainClass="com.taosdata.demo.ConnectionPoolDemo" -Dexec.args="-host localhost"
```
使用mvn运行ConnectionPoolDemo的main方法,可以指定参数
```shell script
Usage:
mvn exec:java -Dexec.mainClass="com.taosdata.demo.ConnectionPoolDemo" -Dexec.args="<args>"
-host : hostname
-poolType <c3p0| dbcp| druid| hikari>
-poolSize <poolSize>
-tableSize <tableSize>
-batchSize : 每条Insert SQL中values的数量
-sleep : 每次插入任务提交后的
```
### 如何停止程序:
ConnectionPoolDemo不会自己停止,会一直执行插入,需要手动Ctrl+C运行。
### 日志
使用log4j,将日志和错误分别输出到了debug.log和error.log中
\ No newline at end of file
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>com.taosdata.demo</groupId>
<artifactId>connectionPools</artifactId>
<version>1.0-SNAPSHOT</version>
<dependencies>
<dependency>
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>2.0.11</version>
</dependency>
<!-- druid -->
<dependency>
<groupId>com.alibaba</groupId>
<artifactId>druid</artifactId>
<version>1.1.17</version>
</dependency>
<!-- HikariCP -->
<dependency>
<groupId>com.zaxxer</groupId>
<artifactId>HikariCP</artifactId>
<version>3.2.0</version>
</dependency>
<!-- dbcp Connection Pool -->
<dependency>
<groupId>commons-pool</groupId>
<artifactId>commons-pool</artifactId>
<version>1.5.4</version>
</dependency>
<dependency>
<groupId>commons-dbcp</groupId>
<artifactId>commons-dbcp</artifactId>
<version>1.4</version>
</dependency>
<!-- c3p0 -->
<dependency>
<groupId>com.mchange</groupId>
<artifactId>c3p0</artifactId>
<version>0.9.5.2</version>
</dependency>
<!-- log4j -->
<dependency>
<groupId>log4j</groupId>
<artifactId>log4j</artifactId>
<version>1.2.17</version>
</dependency>
</dependencies>
</project>
\ No newline at end of file
package com.taosdata.demo;
import com.taosdata.demo.common.InsertTask;
import com.taosdata.demo.pool.C3p0Builder;
import com.taosdata.demo.pool.DbcpBuilder;
import com.taosdata.demo.pool.DruidPoolBuilder;
import com.taosdata.demo.pool.HikariCpBuilder;
import org.apache.log4j.Logger;
import javax.sql.DataSource;
import java.sql.Connection;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
public class ConnectionPoolDemo {
private static Logger logger = Logger.getLogger(DruidPoolBuilder.class);
private static final String dbName = "pool_test";
private static int batchSize = 10;
private static int sleep = 1000;
private static int poolSize = 50;
private static int tableSize = 1000;
private static int threadCount = 50;
private static String poolType = "hikari";
public static void main(String[] args) throws InterruptedException {
String host = null;
for (int i = 0; i < args.length; i++) {
if ("-host".equalsIgnoreCase(args[i]) && i < args.length - 1) {
host = args[++i];
}
if ("-batchSize".equalsIgnoreCase(args[i]) && i < args.length - 1) {
batchSize = Integer.parseInt(args[++i]);
}
if ("-sleep".equalsIgnoreCase(args[i]) && i < args.length - 1) {
sleep = Integer.parseInt(args[++i]);
}
if ("-poolSize".equalsIgnoreCase(args[i]) && i < args.length - 1) {
poolSize = Integer.parseInt(args[++i]);
}
if ("-tableSize".equalsIgnoreCase(args[i]) && i < args.length - 1) {
tableSize = Integer.parseInt(args[++i]);
}
if ("-poolType".equalsIgnoreCase(args[i]) && i < args.length - 1) {
poolType = args[++i];
}
}
if (host == null) {
System.out.println("Usage: java -jar XXX.jar " +
"-host <hostname> " +
"-batchSize <batchSize> " +
"-sleep <sleep> " +
"-poolSize <poolSize> " +
"-tableSize <tableSize>" +
"-poolType <c3p0| dbcp| druid| hikari>");
return;
}
DataSource dataSource;
switch (poolType) {
case "c3p0":
dataSource = C3p0Builder.getDataSource(host, poolSize);
break;
case "dbcp":
dataSource = DbcpBuilder.getDataSource(host, poolSize);
break;
case "druid":
dataSource = DruidPoolBuilder.getDataSource(host, poolSize);
break;
case "hikari":
default:
dataSource = HikariCpBuilder.getDataSource(host, poolSize);
poolType = "hikari";
}
logger.info(">>>>>>>>>>>>>> connection pool Type: " + poolType);
init(dataSource);
ExecutorService executor = Executors.newFixedThreadPool(threadCount);
while (true) {
executor.execute(new InsertTask(dataSource, dbName, tableSize, batchSize));
if (sleep > 0)
TimeUnit.MILLISECONDS.sleep(sleep);
}
}
private static void init(DataSource dataSource) {
try (Connection conn = dataSource.getConnection()) {
execute(conn, "drop database if exists " + dbName + "");
execute(conn, "create database if not exists " + dbName + "");
execute(conn, "use " + dbName + "");
execute(conn, "create table weather(ts timestamp, temperature float, humidity int) tags(location nchar(64), groupId int)");
for (int tb_ind = 1; tb_ind <= tableSize; tb_ind++) {
execute(conn, "create table t_" + tb_ind + " using weather tags('beijing'," + (tb_ind + 1) + ")");
}
logger.info(">>>>>>>>>>>>>>>>>>>>>>>>>>>> init finished.");
} catch (SQLException e) {
e.printStackTrace();
}
}
private static void execute(Connection con, String sql) {
try (Statement stmt = con.createStatement()) {
stmt.executeUpdate(sql);
logger.info("SQL >>> " + sql);
} catch (SQLException e) {
e.printStackTrace();
}
}
}
package com.taosdata.demo.common;
import org.apache.log4j.Logger;
import javax.sql.DataSource;
import java.sql.Connection;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.Random;
public class InsertTask implements Runnable {
private final Random random = new Random(System.currentTimeMillis());
private static final Logger logger = Logger.getLogger(InsertTask.class);
private final DataSource ds;
private final int batchSize;
private final String dbName;
private final int tableSize;
public InsertTask(DataSource ds, String dbName, int tableSize, int batchSize) {
this.ds = ds;
this.dbName = dbName;
this.tableSize = tableSize;
this.batchSize = batchSize;
}
@Override
public void run() {
Connection conn = null;
Statement stmt = null;
int affectedRows = 0;
long start = System.currentTimeMillis();
try {
conn = ds.getConnection();
stmt = conn.createStatement();
for (int tb_index = 1; tb_index <= tableSize; tb_index++) {
StringBuilder sb = new StringBuilder();
sb.append("insert into ");
sb.append(dbName);
sb.append(".t_");
sb.append(tb_index);
sb.append("(ts, temperature, humidity) values ");
for (int i = 0; i < batchSize; i++) {
sb.append("(");
sb.append(start + i);
sb.append(", ");
sb.append(random.nextFloat() * 30);
sb.append(", ");
sb.append(random.nextInt(70));
sb.append(") ");
}
logger.info("SQL >>> " + sb.toString());
affectedRows += stmt.executeUpdate(sb.toString());
}
} catch (SQLException e) {
e.printStackTrace();
} finally {
if (stmt != null) {
try {
stmt.close();
} catch (SQLException e) {
e.printStackTrace();
}
}
if (conn != null) {
try {
conn.close();
} catch (SQLException e) {
e.printStackTrace();
}
}
logger.info(">>> affectedRows:" + affectedRows + " TimeCost:" + (System.currentTimeMillis() - start) + " ms");
}
}
}
package com.taosdata.demo.pool;
import com.mchange.v2.c3p0.ComboPooledDataSource;
import org.apache.commons.dbcp.BasicDataSource;
import javax.sql.DataSource;
import java.beans.PropertyVetoException;
public class C3p0Builder {
public static DataSource getDataSource(String host, int poolSize) {
ComboPooledDataSource ds = new ComboPooledDataSource();
try {
ds.setDriverClass("com.taosdata.jdbc.TSDBDriver");
} catch (PropertyVetoException e) {
e.printStackTrace();
}
ds.setJdbcUrl("jdbc:TAOS://" + host + ":6030");
ds.setUser("root");
ds.setPassword("taosdata");
ds.setMinPoolSize(poolSize);
ds.setMaxPoolSize(poolSize);
ds.setAcquireIncrement(5);
return ds;
}
}
package com.taosdata.demo.pool;
import org.apache.commons.dbcp.BasicDataSource;
import javax.sql.DataSource;
public class DbcpBuilder {
public static DataSource getDataSource(String host, int poolSize) {
BasicDataSource ds = new BasicDataSource();
ds.setDriverClassName("com.taosdata.jdbc.TSDBDriver");
ds.setUrl("jdbc:TAOS://" + host + ":6030");
ds.setUsername("root");
ds.setPassword("taosdata");
ds.setMaxActive(poolSize);
ds.setMinIdle(poolSize);
ds.setInitialSize(poolSize);
return ds;
}
}
package com.taosdata.demo.pool;
import com.alibaba.druid.pool.DruidDataSource;
import javax.sql.DataSource;
public class DruidPoolBuilder {
public static DataSource getDataSource(String host, int poolSize) {
final String url = "jdbc:TAOS://" + host + ":6030";
DruidDataSource dataSource = new DruidDataSource();
dataSource.setUrl(url);
dataSource.setDriverClassName("com.taosdata.jdbc.TSDBDriver");
dataSource.setUsername("root");
dataSource.setPassword("taosdata");
//初始连接数,默认0
dataSource.setInitialSize(poolSize);
//最大连接数,默认8
dataSource.setMaxActive(poolSize);
//最小闲置数
dataSource.setMinIdle(poolSize);
//获取连接的最大等待时间,单位毫秒
dataSource.setMaxWait(2000);
return dataSource;
}
}
package com.taosdata.demo.pool;
import com.zaxxer.hikari.HikariConfig;
import com.zaxxer.hikari.HikariDataSource;
import javax.sql.DataSource;
public class HikariCpBuilder {
public static DataSource getDataSource(String host, int poolSize) {
HikariConfig config = new HikariConfig();
config.setDriverClassName("com.taosdata.jdbc.TSDBDriver");
config.setJdbcUrl("jdbc:TAOS://" + host + ":6030");
config.setUsername("root");
config.setPassword("taosdata");
config.setMaximumPoolSize(poolSize);
config.setMinimumIdle(poolSize);
HikariDataSource ds = new HikariDataSource(config);
return ds;
}
}
### 设置###
log4j.rootLogger=debug,stdout,DebugLog,ErrorLog
### 输出信息到控制抬 ###
log4j.appender.stdout=org.apache.log4j.ConsoleAppender
log4j.appender.stdout.Target=System.out
log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
log4j.appender.stdout.layout.ConversionPattern=[%-5p] %d{yyyy-MM-dd HH:mm:ss,SSS} method:%l%n%m%n
### 输出DEBUG 级别以上的日志到=logs/debug.log
log4j.appender.DebugLog=org.apache.log4j.DailyRollingFileAppender
log4j.appender.DebugLog.File=logs/debug.log
log4j.appender.DebugLog.Append=true
log4j.appender.DebugLog.Threshold=DEBUG
log4j.appender.DebugLog.layout=org.apache.log4j.PatternLayout
log4j.appender.DebugLog.layout.ConversionPattern=%-d{yyyy-MM-dd HH:mm:ss} [ %t:%r ] - [ %p ] %m%n
### 输出ERROR 级别以上的日志到=logs/error.log
log4j.appender.ErrorLog=org.apache.log4j.DailyRollingFileAppender
log4j.appender.ErrorLog.File=logs/error.log
log4j.appender.ErrorLog.Append=true
log4j.appender.ErrorLog.Threshold=ERROR
log4j.appender.ErrorLog.layout=org.apache.log4j.PatternLayout
log4j.appender.ErrorLog.layout.ConversionPattern=%-d{yyyy-MM-dd HH:mm:ss} [ %t:%r ] - [ %p ] %m%n
\ No newline at end of file
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册