diff --git a/documentation20/webdocs/markdowndocs/TAOS SQL-ch.md b/documentation20/webdocs/markdowndocs/TAOS SQL-ch.md index 905d3b2cd72044e621c125641ca053d1cda809d9..760ebae4fc3b7ddd609d1bfb5689f51b05fc7cb7 100644 --- a/documentation20/webdocs/markdowndocs/TAOS SQL-ch.md +++ b/documentation20/webdocs/markdowndocs/TAOS SQL-ch.md @@ -90,7 +90,7 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic ```mysql ALTER DATABASE db_name REPLICA 2; ``` - REPLICA参数是指修改数据库副本数,取值范围[1, 3]。在集群中使用,副本数必须小于dnode的数目。 + REPLICA参数是指修改数据库副本数,取值范围[1, 3]。在集群中使用,副本数必须小于或等于dnode的数目。 ```mysql ALTER DATABASE db_name KEEP 365; diff --git a/src/connector/jdbc/deploy-pom.xml b/src/connector/jdbc/deploy-pom.xml index 893290868ddc4d87e7052e2fe27a13461c02508d..51db837c7b1149bfc5dca6d69a953ceb6b3eb898 100755 --- a/src/connector/jdbc/deploy-pom.xml +++ b/src/connector/jdbc/deploy-pom.xml @@ -5,14 +5,13 @@ com.taosdata.jdbc taos-jdbcdriver - 2.0.9 + 2.0.10 jar JDBCDriver https://github.com/taosdata/TDengine/tree/master/src/connector/jdbc TDengine JDBC Driver - GNU AFFERO GENERAL PUBLIC LICENSE Version 3 diff --git a/tests/examples/JDBC/connectionPools/README-cn.md b/tests/examples/JDBC/connectionPools/README-cn.md new file mode 100644 index 0000000000000000000000000000000000000000..761596dfc55a3e2c9f449ed34fd72ac96c277512 --- /dev/null +++ b/tests/examples/JDBC/connectionPools/README-cn.md @@ -0,0 +1,33 @@ +这个example中,我们适配了java常见的连接池: +* c3p0 +* dbcp +* druid +* HikariCP + +### 说明 +ConnectionPoolDemo的程序逻辑: +1. 创建到host的connection连接池 +2. 创建名称为pool_test的database,创建表超级weather,创建tableSize个子表 +3. 不断向所有子表进行插入。 + +### 如何运行这个例子: +```shell script +# mvn exec:java -Dexec.mainClass="com.taosdata.demo.ConnectionPoolDemo" -Dexec.args="-host localhost" +``` +使用mvn运行ConnectionPoolDemo的main方法,可以指定参数 +```shell script +Usage: +mvn exec:java -Dexec.mainClass="com.taosdata.demo.ConnectionPoolDemo" -Dexec.args="" +-host : hostname +-poolType +-poolSize +-tableSize +-batchSize : 每条Insert SQL中values的数量 +-sleep : 每次插入任务提交后的 +``` + +### 如何停止程序: +ConnectionPoolDemo不会自己停止,会一直执行插入,需要手动Ctrl+C运行。 + +### 日志 +使用log4j,将日志和错误分别输出到了debug.log和error.log中 \ No newline at end of file diff --git a/tests/examples/JDBC/connectionPools/pom.xml b/tests/examples/JDBC/connectionPools/pom.xml new file mode 100644 index 0000000000000000000000000000000000000000..d117c59637b00ebc1894e533fc8fd6a4f99d203f --- /dev/null +++ b/tests/examples/JDBC/connectionPools/pom.xml @@ -0,0 +1,56 @@ + + + 4.0.0 + + com.taosdata.demo + connectionPools + 1.0-SNAPSHOT + + + + com.taosdata.jdbc + taos-jdbcdriver + 2.0.11 + + + + + com.alibaba + druid + 1.1.17 + + + + com.zaxxer + HikariCP + 3.2.0 + + + + commons-pool + commons-pool + 1.5.4 + + + commons-dbcp + commons-dbcp + 1.4 + + + + com.mchange + c3p0 + 0.9.5.2 + + + + + log4j + log4j + 1.2.17 + + + + \ No newline at end of file diff --git a/tests/examples/JDBC/connectionPools/src/main/java/com/taosdata/demo/ConnectionPoolDemo.java b/tests/examples/JDBC/connectionPools/src/main/java/com/taosdata/demo/ConnectionPoolDemo.java new file mode 100644 index 0000000000000000000000000000000000000000..79c0aacea740dcb6fca9780c7f64872c537c3225 --- /dev/null +++ b/tests/examples/JDBC/connectionPools/src/main/java/com/taosdata/demo/ConnectionPoolDemo.java @@ -0,0 +1,117 @@ +package com.taosdata.demo; + +import com.taosdata.demo.common.InsertTask; +import com.taosdata.demo.pool.C3p0Builder; +import com.taosdata.demo.pool.DbcpBuilder; +import com.taosdata.demo.pool.DruidPoolBuilder; +import com.taosdata.demo.pool.HikariCpBuilder; +import org.apache.log4j.Logger; + +import javax.sql.DataSource; +import java.sql.Connection; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; + +public class ConnectionPoolDemo { + + private static Logger logger = Logger.getLogger(DruidPoolBuilder.class); + private static final String dbName = "pool_test"; + + private static int batchSize = 10; + private static int sleep = 1000; + private static int poolSize = 50; + private static int tableSize = 1000; + private static int threadCount = 50; + private static String poolType = "hikari"; + + + public static void main(String[] args) throws InterruptedException { + String host = null; + for (int i = 0; i < args.length; i++) { + if ("-host".equalsIgnoreCase(args[i]) && i < args.length - 1) { + host = args[++i]; + } + if ("-batchSize".equalsIgnoreCase(args[i]) && i < args.length - 1) { + batchSize = Integer.parseInt(args[++i]); + } + if ("-sleep".equalsIgnoreCase(args[i]) && i < args.length - 1) { + sleep = Integer.parseInt(args[++i]); + } + if ("-poolSize".equalsIgnoreCase(args[i]) && i < args.length - 1) { + poolSize = Integer.parseInt(args[++i]); + } + if ("-tableSize".equalsIgnoreCase(args[i]) && i < args.length - 1) { + tableSize = Integer.parseInt(args[++i]); + } + if ("-poolType".equalsIgnoreCase(args[i]) && i < args.length - 1) { + poolType = args[++i]; + } + } + if (host == null) { + System.out.println("Usage: java -jar XXX.jar " + + "-host " + + "-batchSize " + + "-sleep " + + "-poolSize " + + "-tableSize " + + "-poolType "); + return; + } + + DataSource dataSource; + switch (poolType) { + case "c3p0": + dataSource = C3p0Builder.getDataSource(host, poolSize); + break; + case "dbcp": + dataSource = DbcpBuilder.getDataSource(host, poolSize); + break; + case "druid": + dataSource = DruidPoolBuilder.getDataSource(host, poolSize); + break; + case "hikari": + default: + dataSource = HikariCpBuilder.getDataSource(host, poolSize); + poolType = "hikari"; + } + + logger.info(">>>>>>>>>>>>>> connection pool Type: " + poolType); + + init(dataSource); + + ExecutorService executor = Executors.newFixedThreadPool(threadCount); + while (true) { + executor.execute(new InsertTask(dataSource, dbName, tableSize, batchSize)); + if (sleep > 0) + TimeUnit.MILLISECONDS.sleep(sleep); + } + } + + private static void init(DataSource dataSource) { + try (Connection conn = dataSource.getConnection()) { + execute(conn, "drop database if exists " + dbName + ""); + execute(conn, "create database if not exists " + dbName + ""); + execute(conn, "use " + dbName + ""); + execute(conn, "create table weather(ts timestamp, temperature float, humidity int) tags(location nchar(64), groupId int)"); + for (int tb_ind = 1; tb_ind <= tableSize; tb_ind++) { + execute(conn, "create table t_" + tb_ind + " using weather tags('beijing'," + (tb_ind + 1) + ")"); + } + logger.info(">>>>>>>>>>>>>>>>>>>>>>>>>>>> init finished."); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + private static void execute(Connection con, String sql) { + try (Statement stmt = con.createStatement()) { + stmt.executeUpdate(sql); + logger.info("SQL >>> " + sql); + } catch (SQLException e) { + e.printStackTrace(); + } + } + +} diff --git a/tests/examples/JDBC/connectionPools/src/main/java/com/taosdata/demo/common/InsertTask.java b/tests/examples/JDBC/connectionPools/src/main/java/com/taosdata/demo/common/InsertTask.java new file mode 100644 index 0000000000000000000000000000000000000000..ed86acd6e9f8bfb8c862c1764e39f541d3f054eb --- /dev/null +++ b/tests/examples/JDBC/connectionPools/src/main/java/com/taosdata/demo/common/InsertTask.java @@ -0,0 +1,77 @@ +package com.taosdata.demo.common; + +import org.apache.log4j.Logger; + +import javax.sql.DataSource; +import java.sql.Connection; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.Random; + +public class InsertTask implements Runnable { + private final Random random = new Random(System.currentTimeMillis()); + private static final Logger logger = Logger.getLogger(InsertTask.class); + + private final DataSource ds; + private final int batchSize; + private final String dbName; + private final int tableSize; + + public InsertTask(DataSource ds, String dbName, int tableSize, int batchSize) { + this.ds = ds; + this.dbName = dbName; + this.tableSize = tableSize; + this.batchSize = batchSize; + } + + @Override + public void run() { + Connection conn = null; + Statement stmt = null; + int affectedRows = 0; + + long start = System.currentTimeMillis(); + try { + conn = ds.getConnection(); + stmt = conn.createStatement(); + + for (int tb_index = 1; tb_index <= tableSize; tb_index++) { + StringBuilder sb = new StringBuilder(); + sb.append("insert into "); + sb.append(dbName); + sb.append(".t_"); + sb.append(tb_index); + sb.append("(ts, temperature, humidity) values "); + for (int i = 0; i < batchSize; i++) { + sb.append("("); + sb.append(start + i); + sb.append(", "); + sb.append(random.nextFloat() * 30); + sb.append(", "); + sb.append(random.nextInt(70)); + sb.append(") "); + } + logger.info("SQL >>> " + sb.toString()); + affectedRows += stmt.executeUpdate(sb.toString()); + } + } catch (SQLException e) { + e.printStackTrace(); + } finally { + if (stmt != null) { + try { + stmt.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } + if (conn != null) { + try { + conn.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } + logger.info(">>> affectedRows:" + affectedRows + " TimeCost:" + (System.currentTimeMillis() - start) + " ms"); + } + } +} diff --git a/tests/examples/JDBC/connectionPools/src/main/java/com/taosdata/demo/pool/C3p0Builder.java b/tests/examples/JDBC/connectionPools/src/main/java/com/taosdata/demo/pool/C3p0Builder.java new file mode 100644 index 0000000000000000000000000000000000000000..587f417410f96f43be2ced5a4820cd49cdb99a17 --- /dev/null +++ b/tests/examples/JDBC/connectionPools/src/main/java/com/taosdata/demo/pool/C3p0Builder.java @@ -0,0 +1,28 @@ +package com.taosdata.demo.pool; + +import com.mchange.v2.c3p0.ComboPooledDataSource; +import org.apache.commons.dbcp.BasicDataSource; + +import javax.sql.DataSource; +import java.beans.PropertyVetoException; + +public class C3p0Builder { + + public static DataSource getDataSource(String host, int poolSize) { + ComboPooledDataSource ds = new ComboPooledDataSource(); + + try { + ds.setDriverClass("com.taosdata.jdbc.TSDBDriver"); + } catch (PropertyVetoException e) { + e.printStackTrace(); + } + ds.setJdbcUrl("jdbc:TAOS://" + host + ":6030"); + ds.setUser("root"); + ds.setPassword("taosdata"); + + ds.setMinPoolSize(poolSize); + ds.setMaxPoolSize(poolSize); + ds.setAcquireIncrement(5); + return ds; + } +} diff --git a/tests/examples/JDBC/connectionPools/src/main/java/com/taosdata/demo/pool/DbcpBuilder.java b/tests/examples/JDBC/connectionPools/src/main/java/com/taosdata/demo/pool/DbcpBuilder.java new file mode 100644 index 0000000000000000000000000000000000000000..3c34a32532f595bf3134942094e96e952bd09dbb --- /dev/null +++ b/tests/examples/JDBC/connectionPools/src/main/java/com/taosdata/demo/pool/DbcpBuilder.java @@ -0,0 +1,21 @@ +package com.taosdata.demo.pool; + +import org.apache.commons.dbcp.BasicDataSource; + +import javax.sql.DataSource; + +public class DbcpBuilder { + + public static DataSource getDataSource(String host, int poolSize) { + BasicDataSource ds = new BasicDataSource(); + ds.setDriverClassName("com.taosdata.jdbc.TSDBDriver"); + ds.setUrl("jdbc:TAOS://" + host + ":6030"); + ds.setUsername("root"); + ds.setPassword("taosdata"); + + ds.setMaxActive(poolSize); + ds.setMinIdle(poolSize); + ds.setInitialSize(poolSize); + return ds; + } +} diff --git a/tests/examples/JDBC/connectionPools/src/main/java/com/taosdata/demo/pool/DruidPoolBuilder.java b/tests/examples/JDBC/connectionPools/src/main/java/com/taosdata/demo/pool/DruidPoolBuilder.java new file mode 100644 index 0000000000000000000000000000000000000000..e5dc14c6a5ef69c2a7059d5d78b621e25ff3d799 --- /dev/null +++ b/tests/examples/JDBC/connectionPools/src/main/java/com/taosdata/demo/pool/DruidPoolBuilder.java @@ -0,0 +1,31 @@ +package com.taosdata.demo.pool; + +import com.alibaba.druid.pool.DruidDataSource; + +import javax.sql.DataSource; + +public class DruidPoolBuilder { + + public static DataSource getDataSource(String host, int poolSize) { + final String url = "jdbc:TAOS://" + host + ":6030"; + + DruidDataSource dataSource = new DruidDataSource(); + dataSource.setUrl(url); + dataSource.setDriverClassName("com.taosdata.jdbc.TSDBDriver"); + dataSource.setUsername("root"); + dataSource.setPassword("taosdata"); + + //初始连接数,默认0 + dataSource.setInitialSize(poolSize); + //最大连接数,默认8 + dataSource.setMaxActive(poolSize); + //最小闲置数 + dataSource.setMinIdle(poolSize); + //获取连接的最大等待时间,单位毫秒 + dataSource.setMaxWait(2000); + + return dataSource; + } + + +} diff --git a/tests/examples/JDBC/connectionPools/src/main/java/com/taosdata/demo/pool/HikariCpBuilder.java b/tests/examples/JDBC/connectionPools/src/main/java/com/taosdata/demo/pool/HikariCpBuilder.java new file mode 100644 index 0000000000000000000000000000000000000000..87f1f4ad2cbba41a779f0247f2214ef2bf04a8ca --- /dev/null +++ b/tests/examples/JDBC/connectionPools/src/main/java/com/taosdata/demo/pool/HikariCpBuilder.java @@ -0,0 +1,22 @@ +package com.taosdata.demo.pool; + +import com.zaxxer.hikari.HikariConfig; +import com.zaxxer.hikari.HikariDataSource; + +import javax.sql.DataSource; + +public class HikariCpBuilder { + + public static DataSource getDataSource(String host, int poolSize) { + HikariConfig config = new HikariConfig(); + config.setDriverClassName("com.taosdata.jdbc.TSDBDriver"); + config.setJdbcUrl("jdbc:TAOS://" + host + ":6030"); + config.setUsername("root"); + config.setPassword("taosdata"); + + config.setMaximumPoolSize(poolSize); + config.setMinimumIdle(poolSize); + HikariDataSource ds = new HikariDataSource(config); + return ds; + } +} diff --git a/tests/examples/JDBC/connectionPools/src/main/resources/log4j.properties b/tests/examples/JDBC/connectionPools/src/main/resources/log4j.properties new file mode 100644 index 0000000000000000000000000000000000000000..1299357be3d2e99ca6b79227f14ca7a587718914 --- /dev/null +++ b/tests/examples/JDBC/connectionPools/src/main/resources/log4j.properties @@ -0,0 +1,21 @@ +### 设置### +log4j.rootLogger=debug,stdout,DebugLog,ErrorLog +### 输出信息到控制抬 ### +log4j.appender.stdout=org.apache.log4j.ConsoleAppender +log4j.appender.stdout.Target=System.out +log4j.appender.stdout.layout=org.apache.log4j.PatternLayout +log4j.appender.stdout.layout.ConversionPattern=[%-5p] %d{yyyy-MM-dd HH:mm:ss,SSS} method:%l%n%m%n +### 输出DEBUG 级别以上的日志到=logs/debug.log +log4j.appender.DebugLog=org.apache.log4j.DailyRollingFileAppender +log4j.appender.DebugLog.File=logs/debug.log +log4j.appender.DebugLog.Append=true +log4j.appender.DebugLog.Threshold=DEBUG +log4j.appender.DebugLog.layout=org.apache.log4j.PatternLayout +log4j.appender.DebugLog.layout.ConversionPattern=%-d{yyyy-MM-dd HH:mm:ss} [ %t:%r ] - [ %p ] %m%n +### 输出ERROR 级别以上的日志到=logs/error.log +log4j.appender.ErrorLog=org.apache.log4j.DailyRollingFileAppender +log4j.appender.ErrorLog.File=logs/error.log +log4j.appender.ErrorLog.Append=true +log4j.appender.ErrorLog.Threshold=ERROR +log4j.appender.ErrorLog.layout=org.apache.log4j.PatternLayout +log4j.appender.ErrorLog.layout.ConversionPattern=%-d{yyyy-MM-dd HH:mm:ss} [ %t:%r ] - [ %p ] %m%n \ No newline at end of file