提交 c4c7402f 编写于 作者: Y yihaoDeng

Merge branch 'master' of https://github.com/taosdata/TDengine

......@@ -11,7 +11,7 @@ steps:
image: gcc
commands:
- apt-get update
- apt-get install -y cmake build-essential git
- apt-get install -y cmake build-essential
- mkdir debug
- cd debug
- cmake ..
......@@ -82,7 +82,7 @@ platform:
steps:
- name: build
image: gcc
image: arm32v7/ubuntu:bionic
commands:
- apt-get update
- apt-get install -y cmake build-essential
......
......@@ -4,7 +4,7 @@ PROJECT(TDengine)
IF (DEFINED VERNUMBER)
SET(TD_VER_NUMBER ${VERNUMBER})
ELSE ()
SET(TD_VER_NUMBER "2.0.20.0")
SET(TD_VER_NUMBER "2.0.20.2")
ENDIF ()
IF (DEFINED VERCOMPATIBLE)
......
name: tdengine
base: core18
version: '2.0.20.0'
version: '2.0.20.2'
icon: snap/gui/t-dengine.svg
summary: an open-source big data platform designed and optimized for IoT.
description: |
......@@ -72,7 +72,7 @@ parts:
- usr/bin/taosd
- usr/bin/taos
- usr/bin/taosdemo
- usr/lib/libtaos.so.2.0.20.0
- usr/lib/libtaos.so.2.0.20.2
- usr/lib/libtaos.so.1
- usr/lib/libtaos.so
......
package com.taosdata.jdbc.rs;
import com.google.common.collect.Range;
import com.google.common.collect.RangeSet;
import com.google.common.collect.TreeRangeSet;
import com.taosdata.jdbc.TSDBError;
import com.taosdata.jdbc.TSDBErrorNumbers;
import com.taosdata.jdbc.utils.SqlSyntaxValidator;
import com.taosdata.jdbc.utils.Utils;
import java.io.InputStream;
import java.io.Reader;
import java.math.BigDecimal;
import java.net.URL;
import java.nio.charset.Charset;
import java.sql.*;
import java.util.Calendar;
import java.util.HashMap;
import java.util.Map;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
public class RestfulPreparedStatement extends RestfulStatement implements PreparedStatement {
......
......@@ -67,7 +67,7 @@ public class Utils {
findPlaceholderPosition(preparedSql, placeholderPositions);
findClauseRangeSet(preparedSql, clause, clauseRangeSet);
return transformSql(preparedSql, parameters, placeholderPositions, clauseRangeSet);
return transformSql(rawSql, parameters, placeholderPositions, clauseRangeSet);
}
private static void findClauseRangeSet(String preparedSql, String[] regexArr, RangeSet<Integer> clauseRangeSet) {
......@@ -95,14 +95,14 @@ public class Utils {
/***
*
* @param preparedSql
* @param rawSql
* @param paramArr
* @param placeholderPosition
* @param clauseRangeSet
* @return
*/
private static String transformSql(String preparedSql, Object[] paramArr, Map<Integer, Integer> placeholderPosition, RangeSet<Integer> clauseRangeSet) {
String[] sqlArr = preparedSql.split("\\?");
private static String transformSql(String rawSql, Object[] paramArr, Map<Integer, Integer> placeholderPosition, RangeSet<Integer> clauseRangeSet) {
String[] sqlArr = rawSql.split("\\?");
return IntStream.range(0, sqlArr.length).mapToObj(index -> {
if (index == paramArr.length)
return sqlArr[index];
......
......@@ -12,6 +12,7 @@ import java.util.Properties;
import java.util.concurrent.TimeUnit;
public class SubscribeTest {
Connection connection;
Statement statement;
String dbName = "test";
......@@ -19,62 +20,53 @@ public class SubscribeTest {
String host = "127.0.0.1";
String topic = "test";
@Before
public void createDatabase() {
try {
Class.forName("com.taosdata.jdbc.TSDBDriver");
Properties properties = new Properties();
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
statement = connection.createStatement();
statement.execute("drop database if exists " + dbName);
statement.execute("create database if not exists " + dbName);
statement.execute("create table if not exists " + dbName + "." + tName + " (ts timestamp, k int, v int)");
long ts = System.currentTimeMillis();
for (int i = 0; i < 2; i++) {
ts += i;
String sql = "insert into " + dbName + "." + tName + " values (" + ts + ", " + (100 + i) + ", " + i + ")";
statement.executeUpdate(sql);
}
} catch (ClassNotFoundException | SQLException e) {
return;
}
}
@Test
public void subscribe() {
try {
String rawSql = "select * from " + dbName + "." + tName + ";";
System.out.println(rawSql);
// TSDBSubscribe subscribe = ((TSDBConnection) connection).subscribe(topic, rawSql, false);
TSDBConnection conn = connection.unwrap(TSDBConnection.class);
TSDBSubscribe subscribe = conn.subscribe(topic, rawSql, false);
int a = 0;
while (true) {
TimeUnit.MILLISECONDS.sleep(1000);
TSDBResultSet resSet = subscribe.consume();
while (resSet.next()) {
for (int i = 1; i <= resSet.getMetaData().getColumnCount(); i++) {
System.out.printf(i + ": " + resSet.getString(i) + "\t");
}
System.out.println("\n======" + a + "==========");
}
a++;
if (a >= 2) {
break;
}
resSet.close();
}
// int a = 0;
// while (true) {
// TimeUnit.MILLISECONDS.sleep(1000);
// TSDBResultSet resSet = subscribe.consume();
// while (resSet.next()) {
// for (int i = 1; i <= resSet.getMetaData().getColumnCount(); i++) {
// System.out.printf(i + ": " + resSet.getString(i) + "\t");
// }
// System.out.println("\n======" + a + "==========");
// }
// a++;
// if (a >= 2) {
// break;
// }
// resSet.close();
// }
//
// subscribe.close(true);
subscribe.close(true);
} catch (Exception e) {
e.printStackTrace();
}
}
@Before
public void createDatabase() throws SQLException {
Properties properties = new Properties();
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
statement = connection.createStatement();
statement.execute("drop database if exists " + dbName);
statement.execute("create database if not exists " + dbName);
statement.execute("create table if not exists " + dbName + "." + tName + " (ts timestamp, k int, v int)");
long ts = System.currentTimeMillis();
statement.executeUpdate("insert into " + dbName + "." + tName + " values (" + ts + ", 100, 1)");
statement.executeUpdate("insert into " + dbName + "." + tName + " values (" + (ts + 1) + ", 101, 2)");
}
@After
public void close() {
try {
......@@ -86,6 +78,5 @@ public class SubscribeTest {
} catch (SQLException e) {
e.printStackTrace();
}
}
}
\ No newline at end of file
......@@ -345,6 +345,31 @@ public class InsertSpecialCharacterJniTest {
}
}
@Test
public void testCase12() throws SQLException {
final long now = System.currentTimeMillis();
// insert
final String sql = "insert into " + tbname1 + "(ts, f1, f2) values(?, 'HelloTDengine', ?) ; ";
try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
pstmt.setTimestamp(1, new Timestamp(now));
pstmt.setString(2, special_character_str_4);
int ret = pstmt.executeUpdate();
Assert.assertEquals(1, ret);
}
// query
final String query = "select * from " + tbname1;
try (Statement stmt = conn.createStatement()) {
ResultSet rs = stmt.executeQuery(query);
rs.next();
long timestamp = rs.getTimestamp(1).getTime();
Assert.assertEquals(now, timestamp);
String f1 = new String(rs.getBytes(2));
Assert.assertEquals("HelloTDengine", f1);
String f2 = rs.getString(3);
Assert.assertEquals(special_character_str_4, f2);
}
}
@Before
public void before() throws SQLException {
try (Statement stmt = conn.createStatement()) {
......
......@@ -346,6 +346,31 @@ public class InsertSpecialCharacterRestfulTest {
}
}
@Test
public void testCase12() throws SQLException {
final long now = System.currentTimeMillis();
// insert
final String sql = "insert into " + tbname1 + "(ts, f1, f2) values(?, 'HelloTDengine', ?) ; ";
try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
pstmt.setTimestamp(1, new Timestamp(now));
pstmt.setString(2, special_character_str_4);
int ret = pstmt.executeUpdate();
Assert.assertEquals(1, ret);
}
// query
final String query = "select * from " + tbname1;
try (Statement stmt = conn.createStatement()) {
ResultSet rs = stmt.executeQuery(query);
rs.next();
long timestamp = rs.getTimestamp(1).getTime();
Assert.assertEquals(now, timestamp);
String f1 = new String(rs.getBytes(2));
Assert.assertEquals("HelloTDengine", f1);
String f2 = rs.getString(3);
Assert.assertEquals(special_character_str_4, f2);
}
}
@Before
public void before() throws SQLException {
try (Statement stmt = conn.createStatement()) {
......
......@@ -81,7 +81,7 @@ enum QUERY_MODE {
#define MAX_DB_NAME_SIZE 64
#define MAX_HOSTNAME_SIZE 64
#define MAX_TB_NAME_SIZE 64
#define MAX_DATA_SIZE (16*1024)
#define MAX_DATA_SIZE (16*1024)+20 // max record len: 16*1024, timestamp string and ,('') need extra space
#define MAX_NUM_DATATYPE 10
#define OPT_ABORT 1 /* –abort */
#define STRING_LEN 60000
......@@ -188,7 +188,7 @@ typedef struct {
/* Used by main to communicate with parse_opt. */
typedef struct SArguments_S {
char * metaFile;
int test_mode;
uint32_t test_mode;
char * host;
uint16_t port;
char * user;
......@@ -205,31 +205,31 @@ typedef struct SArguments_S {
bool verbose_print;
bool performance_print;
char * output_file;
int query_mode;
uint32_t query_mode;
char * datatype[MAX_NUM_DATATYPE + 1];
int len_of_binary;
int num_of_CPR;
int num_of_threads;
int64_t insert_interval;
uint32_t len_of_binary;
uint32_t num_of_CPR;
uint32_t num_of_threads;
uint64_t insert_interval;
int64_t query_times;
int64_t interlace_rows;
int64_t num_of_RPR; // num_of_records_per_req
int64_t max_sql_len;
int64_t num_of_tables;
int64_t num_of_DPT;
uint64_t interlace_rows;
uint64_t num_of_RPR; // num_of_records_per_req
uint64_t max_sql_len;
uint64_t num_of_tables;
uint64_t num_of_DPT;
int abort;
int disorderRatio; // 0: no disorder, >0: x%
int disorderRange; // ms or us by database precision
int method_of_delete;
uint32_t method_of_delete;
char ** arg_list;
int64_t totalInsertRows;
int64_t totalAffectedRows;
uint64_t totalInsertRows;
uint64_t totalAffectedRows;
} SArguments;
typedef struct SColumn_S {
char field[TSDB_COL_NAME_LEN + 1];
char dataType[MAX_TB_NAME_SIZE];
int dataLen;
char field[TSDB_COL_NAME_LEN + 1];
char dataType[MAX_TB_NAME_SIZE];
uint32_t dataLen;
char note[128];
} StrColumn;
......@@ -237,50 +237,50 @@ typedef struct SSuperTable_S {
char sTblName[MAX_TB_NAME_SIZE+1];
int64_t childTblCount;
bool childTblExists; // 0: no, 1: yes
int64_t batchCreateTableNum; // 0: no batch, > 0: batch table number in one sql
int8_t autoCreateTable; // 0: create sub table, 1: auto create sub table
uint64_t batchCreateTableNum; // 0: no batch, > 0: batch table number in one sql
uint8_t autoCreateTable; // 0: create sub table, 1: auto create sub table
char childTblPrefix[MAX_TB_NAME_SIZE];
char dataSource[MAX_TB_NAME_SIZE+1]; // rand_gen or sample
char insertMode[MAX_TB_NAME_SIZE]; // taosc, rest
int64_t childTblLimit;
int64_t childTblOffset;
uint64_t childTblOffset;
// int multiThreadWriteOneTbl; // 0: no, 1: yes
int64_t interlaceRows; //
uint64_t interlaceRows; //
int disorderRatio; // 0: no disorder, >0: x%
int disorderRange; // ms or us by database precision
int64_t maxSqlLen; //
uint64_t maxSqlLen; //
int64_t insertInterval; // insert interval, will override global insert interval
int64_t insertRows;
uint64_t insertInterval; // insert interval, will override global insert interval
uint64_t insertRows;
int64_t timeStampStep;
char startTimestamp[MAX_TB_NAME_SIZE];
char sampleFormat[MAX_TB_NAME_SIZE]; // csv, json
char sampleFile[MAX_FILE_NAME_LEN+1];
char tagsFile[MAX_FILE_NAME_LEN+1];
int columnCount;
uint32_t columnCount;
StrColumn columns[MAX_COLUMN_COUNT];
int tagCount;
uint32_t tagCount;
StrColumn tags[MAX_TAG_COUNT];
char* childTblName;
char* colsOfCreateChildTable;
int64_t lenOfOneRow;
int64_t lenOfTagOfOneRow;
uint64_t lenOfOneRow;
uint64_t lenOfTagOfOneRow;
char* sampleDataBuf;
//int sampleRowCount;
//int sampleUsePos;
int tagSource; // 0: rand, 1: tag sample
uint32_t tagSource; // 0: rand, 1: tag sample
char* tagDataBuf;
int tagSampleCount;
int tagUsePos;
uint32_t tagSampleCount;
uint32_t tagUsePos;
// statistics
int64_t totalInsertRows;
int64_t totalAffectedRows;
uint64_t totalInsertRows;
uint64_t totalAffectedRows;
} SSuperTable;
typedef struct {
......@@ -307,8 +307,8 @@ typedef struct {
typedef struct SDbCfg_S {
// int maxtablesPerVnode;
int minRows;
int maxRows;
uint32_t minRows; // 0 means default
uint32_t maxRows; // 0 means default
int comp;
int walLevel;
int cacheLast;
......@@ -327,7 +327,7 @@ typedef struct SDataBase_S {
char dbName[MAX_DB_NAME_SIZE];
bool drop; // 0: use exists, 1: if exists, drop then new create
SDbCfg dbCfg;
int64_t superTblCount;
uint64_t superTblCount;
SSuperTable superTbls[MAX_SUPER_TABLE_COUNT];
} SDataBase;
......@@ -345,57 +345,57 @@ typedef struct SDbs_S {
bool do_aggreFunc;
bool queryMode;
int threadCount;
int threadCountByCreateTbl;
int dbCount;
uint32_t threadCount;
uint32_t threadCountByCreateTbl;
uint32_t dbCount;
SDataBase db[MAX_DB_COUNT];
// statistics
int64_t totalInsertRows;
int64_t totalAffectedRows;
uint64_t totalInsertRows;
uint64_t totalAffectedRows;
} SDbs;
typedef struct SpecifiedQueryInfo_S {
int64_t queryInterval; // 0: unlimit > 0 loop/s
int64_t concurrent;
int64_t sqlCount;
int mode; // 0: sync, 1: async
int64_t subscribeInterval; // ms
int64_t queryTimes;
uint64_t queryInterval; // 0: unlimit > 0 loop/s
uint64_t concurrent;
uint64_t sqlCount;
uint32_t mode; // 0: sync, 1: async
uint64_t subscribeInterval; // ms
uint64_t queryTimes;
int subscribeRestart;
int subscribeKeepProgress;
char sql[MAX_QUERY_SQL_COUNT][MAX_QUERY_SQL_LENGTH+1];
char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN+1];
TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT];
int64_t totalQueried;
uint64_t totalQueried;
} SpecifiedQueryInfo;
typedef struct SuperQueryInfo_S {
char sTblName[MAX_TB_NAME_SIZE+1];
int64_t queryInterval; // 0: unlimit > 0 loop/s
int threadCnt;
int mode; // 0: sync, 1: async
int64_t subscribeInterval; // ms
uint64_t queryInterval; // 0: unlimit > 0 loop/s
uint32_t threadCnt;
uint32_t mode; // 0: sync, 1: async
uint64_t subscribeInterval; // ms
int subscribeRestart;
int subscribeKeepProgress;
int64_t queryTimes;
int64_t childTblCount;
uint64_t queryTimes;
uint64_t childTblCount;
char childTblPrefix[MAX_TB_NAME_SIZE];
int64_t sqlCount;
uint64_t sqlCount;
char sql[MAX_QUERY_SQL_COUNT][MAX_QUERY_SQL_LENGTH+1];
char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN+1];
TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT];
char* childTblName;
int64_t totalQueried;
uint64_t totalQueried;
} SuperQueryInfo;
typedef struct SQueryMetaInfo_S {
char cfgDir[MAX_FILE_NAME_LEN+1];
char host[MAX_HOSTNAME_SIZE];
uint16_t port;
struct sockaddr_in serv_addr;
struct sockaddr_in serv_addr;
char user[MAX_USERNAME_SIZE];
char password[MAX_PASSWORD_SIZE];
char dbName[MAX_DB_NAME_SIZE+1];
......@@ -403,47 +403,47 @@ typedef struct SQueryMetaInfo_S {
SpecifiedQueryInfo specifiedQueryInfo;
SuperQueryInfo superQueryInfo;
int64_t totalQueried;
uint64_t totalQueried;
} SQueryMetaInfo;
typedef struct SThreadInfo_S {
TAOS *taos;
int threadID;
char db_name[MAX_DB_NAME_SIZE+1];
uint32_t time_precision;
char fp[4096];
char tb_prefix[MAX_TB_NAME_SIZE];
int64_t start_table_from;
int64_t end_table_to;
int64_t ntables;
int64_t data_of_rate;
int64_t start_time;
char* cols;
bool use_metric;
TAOS * taos;
int threadID;
char db_name[MAX_DB_NAME_SIZE+1];
uint32_t time_precision;
char fp[4096];
char tb_prefix[MAX_TB_NAME_SIZE];
uint64_t start_table_from;
uint64_t end_table_to;
uint64_t ntables;
uint64_t data_of_rate;
int64_t start_time;
char* cols;
bool use_metric;
SSuperTable* superTblInfo;
// for async insert
tsem_t lock_sem;
int64_t counter;
tsem_t lock_sem;
int64_t counter;
uint64_t st;
uint64_t et;
int64_t lastTs;
uint64_t lastTs;
// sample data
int64_t samplePos;
int64_t samplePos;
// statistics
int64_t totalInsertRows;
int64_t totalAffectedRows;
uint64_t totalInsertRows;
uint64_t totalAffectedRows;
// insert delay statistics
int64_t cntDelay;
int64_t totalDelay;
int64_t avgDelay;
int64_t maxDelay;
int64_t minDelay;
uint64_t cntDelay;
uint64_t totalDelay;
uint64_t avgDelay;
uint64_t maxDelay;
uint64_t minDelay;
// query
int64_t querySeq; // sequence number of sql command
uint64_t querySeq; // sequence number of sql command
} threadInfo;
#ifdef WINDOWS
......@@ -569,7 +569,7 @@ SArguments g_args = {
1, // query_times
0, // interlace_rows;
30000, // num_of_RPR
1024000, // max_sql_len
(1024*1024), // max_sql_len
10000, // num_of_tables
10000, // num_of_DPT
0, // abort
......@@ -666,11 +666,11 @@ static void printHelp() {
printf("%s%s%s%s\n", indent, "-q", indent,
"Query mode -- 0: SYNC, 1: ASYNC. Default is SYNC.");
printf("%s%s%s%s\n", indent, "-b", indent,
"The data_type of columns, default: TINYINT,SMALLINT,INT,BIGINT,FLOAT,DOUBLE,BINARY,NCHAR,BOOL,TIMESTAMP.");
"The data_type of columns, default: INT,INT,INT,INT.");
printf("%s%s%s%s\n", indent, "-w", indent,
"The length of data_type 'BINARY' or 'NCHAR'. Default is 16");
printf("%s%s%s%s\n", indent, "-l", indent,
"The number of columns per record. Default is 10.");
"The number of columns per record. Default is 4.");
printf("%s%s%s%s\n", indent, "-T", indent,
"The number of threads. Default is 10.");
printf("%s%s%s%s\n", indent, "-i", indent,
......@@ -725,7 +725,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
errorPrint("%s", "\n\t-c need a valid path following!\n");
exit(EXIT_FAILURE);
}
tstrncpy(configDir, argv[++i], MAX_FILE_NAME_LEN);
tstrncpy(configDir, argv[++i], TSDB_FILENAME_LEN);
} else if (strcmp(argv[i], "-h") == 0) {
if (argc == i+1) {
......@@ -967,9 +967,9 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
}
} else if (strcmp(argv[i], "-D") == 0) {
arguments->method_of_delete = atoi(argv[++i]);
if (arguments->method_of_delete < 0
|| arguments->method_of_delete > 3) {
arguments->method_of_delete = 0;
if (arguments->method_of_delete > 3) {
errorPrint("%s", "\n\t-D need a valud (0~3) number following!\n");
exit(EXIT_FAILURE);
}
} else if ((strcmp(argv[i], "--version") == 0) ||
(strcmp(argv[i], "-V") == 0)){
......@@ -1004,17 +1004,17 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
break;
printf("\n");
}
printf("# Insertion interval: %"PRId64"\n",
printf("# Insertion interval: %"PRIu64"\n",
arguments->insert_interval);
printf("# Number of records per req: %"PRId64"\n",
printf("# Number of records per req: %"PRIu64"\n",
arguments->num_of_RPR);
printf("# Max SQL length: %"PRId64"\n",
printf("# Max SQL length: %"PRIu64"\n",
arguments->max_sql_len);
printf("# Length of Binary: %d\n", arguments->len_of_binary);
printf("# Number of Threads: %d\n", arguments->num_of_threads);
printf("# Number of Tables: %"PRId64"\n",
printf("# Number of Tables: %"PRIu64"\n",
arguments->num_of_tables);
printf("# Number of Data per Table: %"PRId64"\n",
printf("# Number of Data per Table: %"PRIu64"\n",
arguments->num_of_DPT);
printf("# Database name: %s\n", arguments->database);
printf("# Table prefix: %s\n", arguments->tb_prefix);
......@@ -1191,13 +1191,31 @@ static float rand_float(){
return randfloat[cursor];
}
#if 0
static const char charNum[] = "0123456789";
static void nonrand_string(char *, int) __attribute__ ((unused)); // reserve for debugging purpose
static void nonrand_string(char *str, int size)
{
str[0] = 0;
if (size > 0) {
int n;
for (n = 0; n < size; n++) {
str[n] = charNum[n % 10];
}
str[n] = 0;
}
}
#endif
static const char charset[] = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890";
static void rand_string(char *str, int size) {
str[0] = 0;
if (size > 0) {
//--size;
int n;
for (n = 0; n < size - 1; n++) {
for (n = 0; n < size; n++) {
int key = abs(rand_tinyint()) % (int)(sizeof(charset) - 1);
str[n] = charset[key];
}
......@@ -1252,11 +1270,11 @@ static int printfInsertMeta() {
printf("resultFile: \033[33m%s\033[0m\n", g_Dbs.resultFile);
printf("thread num of insert data: \033[33m%d\033[0m\n", g_Dbs.threadCount);
printf("thread num of create table: \033[33m%d\033[0m\n", g_Dbs.threadCountByCreateTbl);
printf("top insert interval: \033[33m%"PRId64"\033[0m\n",
printf("top insert interval: \033[33m%"PRIu64"\033[0m\n",
g_args.insert_interval);
printf("number of records per req: \033[33m%"PRId64"\033[0m\n",
printf("number of records per req: \033[33m%"PRIu64"\033[0m\n",
g_args.num_of_RPR);
printf("max sql length: \033[33m%"PRId64"\033[0m\n",
printf("max sql length: \033[33m%"PRIu64"\033[0m\n",
g_args.max_sql_len);
printf("database count: \033[33m%d\033[0m\n", g_Dbs.dbCount);
......@@ -1318,10 +1336,10 @@ static int printfInsertMeta() {
}
}
printf(" super table count: \033[33m%"PRId64"\033[0m\n",
printf(" super table count: \033[33m%"PRIu64"\033[0m\n",
g_Dbs.db[i].superTblCount);
for (int64_t j = 0; j < g_Dbs.db[i].superTblCount; j++) {
printf(" super table[\033[33m%"PRId64"\033[0m]:\n", j);
for (uint64_t j = 0; j < g_Dbs.db[i].superTblCount; j++) {
printf(" super table[\033[33m%"PRIu64"\033[0m]:\n", j);
printf(" stbName: \033[33m%s\033[0m\n",
g_Dbs.db[i].superTbls[j].sTblName);
......@@ -1342,7 +1360,7 @@ static int printfInsertMeta() {
printf(" childTblExists: \033[33m%s\033[0m\n", "error");
}
printf(" childTblCount: \033[33m%"PRId64"\033[0m\n",
printf(" childTblCount: \033[33m%"PRIu64"\033[0m\n",
g_Dbs.db[i].superTbls[j].childTblCount);
printf(" childTblPrefix: \033[33m%s\033[0m\n",
g_Dbs.db[i].superTbls[j].childTblPrefix);
......@@ -1354,11 +1372,11 @@ static int printfInsertMeta() {
printf(" childTblLimit: \033[33m%"PRId64"\033[0m\n",
g_Dbs.db[i].superTbls[j].childTblLimit);
}
if (g_Dbs.db[i].superTbls[j].childTblOffset >= 0) {
printf(" childTblOffset: \033[33m%"PRId64"\033[0m\n",
if (g_Dbs.db[i].superTbls[j].childTblOffset > 0) {
printf(" childTblOffset: \033[33m%"PRIu64"\033[0m\n",
g_Dbs.db[i].superTbls[j].childTblOffset);
}
printf(" insertRows: \033[33m%"PRId64"\033[0m\n",
printf(" insertRows: \033[33m%"PRIu64"\033[0m\n",
g_Dbs.db[i].superTbls[j].insertRows);
/*
if (0 == g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl) {
......@@ -1367,11 +1385,11 @@ static int printfInsertMeta() {
printf(" multiThreadWriteOneTbl: \033[33myes\033[0m\n");
}
*/
printf(" interlaceRows: \033[33m%"PRId64"\033[0m\n",
printf(" interlaceRows: \033[33m%"PRIu64"\033[0m\n",
g_Dbs.db[i].superTbls[j].interlaceRows);
if (g_Dbs.db[i].superTbls[j].interlaceRows > 0) {
printf(" stable insert interval: \033[33m%"PRId64"\033[0m\n",
printf(" stable insert interval: \033[33m%"PRIu64"\033[0m\n",
g_Dbs.db[i].superTbls[j].insertInterval);
}
......@@ -1379,7 +1397,7 @@ static int printfInsertMeta() {
g_Dbs.db[i].superTbls[j].disorderRange);
printf(" disorderRatio: \033[33m%d\033[0m\n",
g_Dbs.db[i].superTbls[j].disorderRatio);
printf(" maxSqlLen: \033[33m%"PRId64"\033[0m\n",
printf(" maxSqlLen: \033[33m%"PRIu64"\033[0m\n",
g_Dbs.db[i].superTbls[j].maxSqlLen);
printf(" timeStampStep: \033[33m%"PRId64"\033[0m\n",
g_Dbs.db[i].superTbls[j].timeStampStep);
......@@ -1445,8 +1463,8 @@ static void printfInsertMetaToFile(FILE* fp) {
fprintf(fp, "resultFile: %s\n", g_Dbs.resultFile);
fprintf(fp, "thread num of insert data: %d\n", g_Dbs.threadCount);
fprintf(fp, "thread num of create table: %d\n", g_Dbs.threadCountByCreateTbl);
fprintf(fp, "number of records per req: %"PRId64"\n", g_args.num_of_RPR);
fprintf(fp, "max sql length: %"PRId64"\n", g_args.max_sql_len);
fprintf(fp, "number of records per req: %"PRIu64"\n", g_args.num_of_RPR);
fprintf(fp, "max sql length: %"PRIu64"\n", g_args.max_sql_len);
fprintf(fp, "database count: %d\n", g_Dbs.dbCount);
for (int i = 0; i < g_Dbs.dbCount; i++) {
......@@ -1503,7 +1521,7 @@ static void printfInsertMetaToFile(FILE* fp) {
}
}
fprintf(fp, " super table count: %"PRId64"\n", g_Dbs.db[i].superTblCount);
fprintf(fp, " super table count: %"PRIu64"\n", g_Dbs.db[i].superTblCount);
for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) {
fprintf(fp, " super table[%d]:\n", j);
......@@ -1525,7 +1543,7 @@ static void printfInsertMetaToFile(FILE* fp) {
fprintf(fp, " childTblExists: %s\n", "error");
}
fprintf(fp, " childTblCount: %"PRId64"\n",
fprintf(fp, " childTblCount: %"PRIu64"\n",
g_Dbs.db[i].superTbls[j].childTblCount);
fprintf(fp, " childTblPrefix: %s\n",
g_Dbs.db[i].superTbls[j].childTblPrefix);
......@@ -1533,12 +1551,12 @@ static void printfInsertMetaToFile(FILE* fp) {
g_Dbs.db[i].superTbls[j].dataSource);
fprintf(fp, " insertMode: %s\n",
g_Dbs.db[i].superTbls[j].insertMode);
fprintf(fp, " insertRows: %"PRId64"\n",
fprintf(fp, " insertRows: %"PRIu64"\n",
g_Dbs.db[i].superTbls[j].insertRows);
fprintf(fp, " interlace rows: %"PRId64"\n",
fprintf(fp, " interlace rows: %"PRIu64"\n",
g_Dbs.db[i].superTbls[j].interlaceRows);
if (g_Dbs.db[i].superTbls[j].interlaceRows > 0) {
fprintf(fp, " stable insert interval: %"PRId64"\n",
fprintf(fp, " stable insert interval: %"PRIu64"\n",
g_Dbs.db[i].superTbls[j].insertInterval);
}
/*
......@@ -1548,11 +1566,11 @@ static void printfInsertMetaToFile(FILE* fp) {
fprintf(fp, " multiThreadWriteOneTbl: yes\n");
}
*/
fprintf(fp, " interlaceRows: %"PRId64"\n",
fprintf(fp, " interlaceRows: %"PRIu64"\n",
g_Dbs.db[i].superTbls[j].interlaceRows);
fprintf(fp, " disorderRange: %d\n", g_Dbs.db[i].superTbls[j].disorderRange);
fprintf(fp, " disorderRatio: %d\n", g_Dbs.db[i].superTbls[j].disorderRatio);
fprintf(fp, " maxSqlLen: %"PRId64"\n",
fprintf(fp, " maxSqlLen: %"PRIu64"\n",
g_Dbs.db[i].superTbls[j].maxSqlLen);
fprintf(fp, " timeStampStep: %"PRId64"\n",
......@@ -1613,21 +1631,21 @@ static void printfQueryMeta() {
printf("\n");
printf("specified table query info: \n");
printf("query interval: \033[33m%"PRId64" ms\033[0m\n",
printf("query interval: \033[33m%"PRIu64" ms\033[0m\n",
g_queryInfo.specifiedQueryInfo.queryInterval);
printf("top query times:\033[33m%"PRId64"\033[0m\n", g_args.query_times);
printf("concurrent: \033[33m%"PRId64"\033[0m\n",
printf("top query times:\033[33m%"PRIu64"\033[0m\n", g_args.query_times);
printf("concurrent: \033[33m%"PRIu64"\033[0m\n",
g_queryInfo.specifiedQueryInfo.concurrent);
printf("sqlCount: \033[33m%"PRId64"\033[0m\n",
printf("sqlCount: \033[33m%"PRIu64"\033[0m\n",
g_queryInfo.specifiedQueryInfo.sqlCount);
printf("specified tbl query times:\n");
printf(" \033[33m%"PRId64"\033[0m\n",
printf(" \033[33m%"PRIu64"\033[0m\n",
g_queryInfo.specifiedQueryInfo.queryTimes);
if (SUBSCRIBE_TEST == g_args.test_mode) {
printf("mod: \033[33m%d\033[0m\n",
g_queryInfo.specifiedQueryInfo.mode);
printf("interval: \033[33m%"PRId64"\033[0m\n",
printf("interval: \033[33m%"PRIu64"\033[0m\n",
g_queryInfo.specifiedQueryInfo.subscribeInterval);
printf("restart: \033[33m%d\033[0m\n",
g_queryInfo.specifiedQueryInfo.subscribeRestart);
......@@ -1635,27 +1653,27 @@ static void printfQueryMeta() {
g_queryInfo.specifiedQueryInfo.subscribeKeepProgress);
}
for (int64_t i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) {
printf(" sql[%"PRId64"]: \033[33m%s\033[0m\n",
for (uint64_t i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) {
printf(" sql[%"PRIu64"]: \033[33m%s\033[0m\n",
i, g_queryInfo.specifiedQueryInfo.sql[i]);
}
printf("\n");
printf("super table query info:\n");
printf("query interval: \033[33m%"PRId64"\033[0m\n",
printf("query interval: \033[33m%"PRIu64"\033[0m\n",
g_queryInfo.superQueryInfo.queryInterval);
printf("threadCnt: \033[33m%d\033[0m\n",
g_queryInfo.superQueryInfo.threadCnt);
printf("childTblCount: \033[33m%"PRId64"\033[0m\n",
printf("childTblCount: \033[33m%"PRIu64"\033[0m\n",
g_queryInfo.superQueryInfo.childTblCount);
printf("stable name: \033[33m%s\033[0m\n",
g_queryInfo.superQueryInfo.sTblName);
printf("stb query times:\033[33m%"PRId64"\033[0m\n",
printf("stb query times:\033[33m%"PRIu64"\033[0m\n",
g_queryInfo.superQueryInfo.queryTimes);
if (SUBSCRIBE_TEST == g_args.test_mode) {
printf("mod: \033[33m%d\033[0m\n",
g_queryInfo.superQueryInfo.mode);
printf("interval: \033[33m%"PRId64"\033[0m\n",
printf("interval: \033[33m%"PRIu64"\033[0m\n",
g_queryInfo.superQueryInfo.subscribeInterval);
printf("restart: \033[33m%d\033[0m\n",
g_queryInfo.superQueryInfo.subscribeRestart);
......@@ -1663,7 +1681,7 @@ static void printfQueryMeta() {
g_queryInfo.superQueryInfo.subscribeKeepProgress);
}
printf("sqlCount: \033[33m%"PRId64"\033[0m\n",
printf("sqlCount: \033[33m%"PRIu64"\033[0m\n",
g_queryInfo.superQueryInfo.sqlCount);
for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) {
printf(" sql[%d]: \033[33m%s\033[0m\n",
......@@ -2272,7 +2290,7 @@ static int calcRowLen(SSuperTable* superTbls) {
static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos,
char* dbName, char* sTblName, char** childTblNameOfSuperTbl,
int64_t* childTblCountOfSuperTbl, int64_t limit, int64_t offset) {
uint64_t* childTblCountOfSuperTbl, int64_t limit, uint64_t offset) {
char command[BUFFER_SIZE] = "\0";
char limitBuf[100] = "\0";
......@@ -2283,7 +2301,7 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos,
char* childTblName = *childTblNameOfSuperTbl;
if (offset >= 0) {
snprintf(limitBuf, 100, " limit %"PRId64" offset %"PRId64"",
snprintf(limitBuf, 100, " limit %"PRId64" offset %"PRIu64"",
limit, offset);
}
......@@ -2349,11 +2367,11 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos,
static int getAllChildNameOfSuperTable(TAOS * taos, char* dbName,
char* sTblName, char** childTblNameOfSuperTbl,
int64_t* childTblCountOfSuperTbl) {
uint64_t* childTblCountOfSuperTbl) {
return getChildNameOfSuperTableWithLimitAndOffset(taos, dbName, sTblName,
childTblNameOfSuperTbl, childTblCountOfSuperTbl,
-1, -1);
-1, 0);
}
static int getSuperTableFromServer(TAOS * taos, char* dbName,
......@@ -2689,7 +2707,7 @@ static int createDatabasesAndStables() {
printf("\ncreate database %s success!\n\n", g_Dbs.db[i].dbName);
}
debugPrint("%s() LN%d supertbl count:%"PRId64"\n",
debugPrint("%s() LN%d supertbl count:%"PRIu64"\n",
__func__, __LINE__, g_Dbs.db[i].superTblCount);
int validStbCount = 0;
......@@ -2748,15 +2766,15 @@ static void* createTable(void *sarg)
int len = 0;
int batchNum = 0;
verbosePrint("%s() LN%d: Creating table from %"PRId64" to %"PRId64"\n",
verbosePrint("%s() LN%d: Creating table from %"PRIu64" to %"PRIu64"\n",
__func__, __LINE__,
pThreadInfo->start_table_from, pThreadInfo->end_table_to);
for (int64_t i = pThreadInfo->start_table_from;
for (uint64_t i = pThreadInfo->start_table_from;
i <= pThreadInfo->end_table_to; i++) {
if (0 == g_Dbs.use_metric) {
snprintf(buffer, buff_len,
"create table if not exists %s.%s%"PRId64" %s;",
"create table if not exists %s.%s%"PRIu64" %s;",
pThreadInfo->db_name,
g_args.tb_prefix, i,
pThreadInfo->cols);
......@@ -2787,7 +2805,7 @@ static void* createTable(void *sarg)
}
len += snprintf(buffer + len,
buff_len - len,
"if not exists %s.%s%"PRId64" using %s.%s tags %s ",
"if not exists %s.%s%"PRIu64" using %s.%s tags %s ",
pThreadInfo->db_name, superTblInfo->childTblPrefix,
i, pThreadInfo->db_name,
superTblInfo->sTblName, tagsValBuf);
......@@ -2811,7 +2829,7 @@ static void* createTable(void *sarg)
int64_t currentPrintTime = taosGetTimestampMs();
if (currentPrintTime - lastPrintTime > 30*1000) {
printf("thread[%d] already create %"PRId64" - %"PRId64" tables\n",
printf("thread[%d] already create %"PRIu64" - %"PRIu64" tables\n",
pThreadInfo->threadID, pThreadInfo->start_table_from, i);
lastPrintTime = currentPrintTime;
}
......@@ -2879,7 +2897,7 @@ static int startMultiThreadCreateChildTable(
startFrom = t_info->end_table_to + 1;
t_info->use_metric = true;
t_info->cols = cols;
t_info->minDelay = INT64_MAX;
t_info->minDelay = UINT64_MAX;
pthread_create(pids + i, NULL, createTable, t_info);
}
......@@ -2945,7 +2963,7 @@ static void createChildTables() {
snprintf(tblColsBuf + len, MAX_SQL_SIZE - len, ")");
verbosePrint("%s() LN%d: dbName: %s num of tb: %"PRId64" schema: %s\n",
verbosePrint("%s() LN%d: dbName: %s num of tb: %"PRIu64" schema: %s\n",
__func__, __LINE__,
g_Dbs.db[i].dbName, g_args.num_of_tables, tblColsBuf);
startMultiThreadCreateChildTable(
......@@ -3073,7 +3091,7 @@ static int readSampleFromCsvFileToMem(
}
if (readLen > superTblInfo->lenOfOneRow) {
printf("sample row len[%d] overflow define schema len[%"PRId64"], so discard this row\n",
printf("sample row len[%d] overflow define schema len[%"PRIu64"], so discard this row\n",
(int32_t)readLen, superTblInfo->lenOfOneRow);
continue;
}
......@@ -3316,7 +3334,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
if (threads2 && threads2->type == cJSON_Number) {
g_Dbs.threadCountByCreateTbl = threads2->valueint;
} else if (!threads2) {
g_Dbs.threadCountByCreateTbl = g_args.num_of_threads;
g_Dbs.threadCountByCreateTbl = 1;
} else {
errorPrint("%s() LN%d, failed to read json, threads2 not found\n",
__func__, __LINE__);
......@@ -3325,6 +3343,11 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
cJSON* gInsertInterval = cJSON_GetObjectItem(root, "insert_interval");
if (gInsertInterval && gInsertInterval->type == cJSON_Number) {
if (gInsertInterval->valueint <0) {
errorPrint("%s() LN%d, failed to read json, insert interval input mistake\n",
__func__, __LINE__);
goto PARSE_OVER;
}
g_args.insert_interval = gInsertInterval->valueint;
} else if (!gInsertInterval) {
g_args.insert_interval = 0;
......@@ -3336,13 +3359,19 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
cJSON* interlaceRows = cJSON_GetObjectItem(root, "interlace_rows");
if (interlaceRows && interlaceRows->type == cJSON_Number) {
if (interlaceRows->valueint < 0) {
errorPrint("%s() LN%d, failed to read json, interlace_rows input mistake\n",
__func__, __LINE__);
goto PARSE_OVER;
}
g_args.interlace_rows = interlaceRows->valueint;
// rows per table need be less than insert batch
if (g_args.interlace_rows > g_args.num_of_RPR) {
printf("NOTICE: interlace rows value %"PRId64" > num_of_records_per_req %"PRId64"\n\n",
printf("NOTICE: interlace rows value %"PRIu64" > num_of_records_per_req %"PRIu64"\n\n",
g_args.interlace_rows, g_args.num_of_RPR);
printf(" interlace rows value will be set to num_of_records_per_req %"PRId64"\n\n",
printf(" interlace rows value will be set to num_of_records_per_req %"PRIu64"\n\n",
g_args.num_of_RPR);
printf(" press Enter key to continue or Ctrl-C to stop.");
(void)getchar();
......@@ -3358,9 +3387,14 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
cJSON* maxSqlLen = cJSON_GetObjectItem(root, "max_sql_len");
if (maxSqlLen && maxSqlLen->type == cJSON_Number) {
if (maxSqlLen->valueint < 0) {
errorPrint("%s() LN%d, failed to read json, max_sql_len input mistake\n",
__func__, __LINE__);
goto PARSE_OVER;
}
g_args.max_sql_len = maxSqlLen->valueint;
} else if (!maxSqlLen) {
g_args.max_sql_len = 1024000;
g_args.max_sql_len = (1024*1024);
} else {
errorPrint("%s() LN%d, failed to read json, max_sql_len input mistake\n",
__func__, __LINE__);
......@@ -3369,9 +3403,14 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
cJSON* numRecPerReq = cJSON_GetObjectItem(root, "num_of_records_per_req");
if (numRecPerReq && numRecPerReq->type == cJSON_Number) {
if (numRecPerReq->valueint < 0) {
errorPrint("%s() LN%d, failed to read json, num_of_records_per_req input mistake\n",
__func__, __LINE__);
goto PARSE_OVER;
}
g_args.num_of_RPR = numRecPerReq->valueint;
} else if (!numRecPerReq) {
g_args.num_of_RPR = INT64_MAX;
g_args.num_of_RPR = UINT64_MAX;
} else {
errorPrint("%s() LN%d, failed to read json, num_of_records_per_req not found\n",
__func__, __LINE__);
......@@ -3531,7 +3570,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
if (minRows && minRows->type == cJSON_Number) {
g_Dbs.db[i].dbCfg.minRows = minRows->valueint;
} else if (!minRows) {
g_Dbs.db[i].dbCfg.minRows = -1;
g_Dbs.db[i].dbCfg.minRows = 0; // 0 means default
} else {
printf("ERROR: failed to read json, minRows not found\n");
goto PARSE_OVER;
......@@ -3541,7 +3580,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
if (maxRows && maxRows->type == cJSON_Number) {
g_Dbs.db[i].dbCfg.maxRows = maxRows->valueint;
} else if (!maxRows) {
g_Dbs.db[i].dbCfg.maxRows = -1;
g_Dbs.db[i].dbCfg.maxRows = 0; // 0 means default
} else {
printf("ERROR: failed to read json, maxRows not found\n");
goto PARSE_OVER;
......@@ -3686,7 +3725,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
cJSON* count = cJSON_GetObjectItem(stbInfo, "childtable_count");
if (!count || count->type != cJSON_Number || 0 >= count->valueint) {
errorPrint("%s() LN%d, failed to read json, childtable_count not found\n",
errorPrint("%s() LN%d, failed to read json, childtable_count input mistake\n",
__func__, __LINE__);
goto PARSE_OVER;
}
......@@ -3840,12 +3879,17 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
*/
cJSON* interlaceRows = cJSON_GetObjectItem(stbInfo, "interlace_rows");
if (interlaceRows && interlaceRows->type == cJSON_Number) {
if (interlaceRows->valueint < 0) {
errorPrint("%s() LN%d, failed to read json, interlace rows input mistake\n",
__func__, __LINE__);
goto PARSE_OVER;
}
g_Dbs.db[i].superTbls[j].interlaceRows = interlaceRows->valueint;
// rows per table need be less than insert batch
if (g_Dbs.db[i].superTbls[j].interlaceRows > g_args.num_of_RPR) {
printf("NOTICE: db[%d].superTbl[%d]'s interlace rows value %"PRId64" > num_of_records_per_req %"PRId64"\n\n",
printf("NOTICE: db[%d].superTbl[%d]'s interlace rows value %"PRIu64" > num_of_records_per_req %"PRIu64"\n\n",
i, j, g_Dbs.db[i].superTbls[j].interlaceRows, g_args.num_of_RPR);
printf(" interlace rows value will be set to num_of_records_per_req %"PRId64"\n\n",
printf(" interlace rows value will be set to num_of_records_per_req %"PRIu64"\n\n",
g_args.num_of_RPR);
printf(" press Enter key to continue or Ctrl-C to stop.");
(void)getchar();
......@@ -3888,6 +3932,11 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
cJSON* insertRows = cJSON_GetObjectItem(stbInfo, "insert_rows");
if (insertRows && insertRows->type == cJSON_Number) {
if (insertRows->valueint < 0) {
errorPrint("%s() LN%d, failed to read json, insert_rows input mistake\n",
__func__, __LINE__);
goto PARSE_OVER;
}
g_Dbs.db[i].superTbls[j].insertRows = insertRows->valueint;
} else if (!insertRows) {
g_Dbs.db[i].superTbls[j].insertRows = 0x7FFFFFFFFFFFFFFF;
......@@ -3900,8 +3949,13 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
cJSON* insertInterval = cJSON_GetObjectItem(stbInfo, "insert_interval");
if (insertInterval && insertInterval->type == cJSON_Number) {
g_Dbs.db[i].superTbls[j].insertInterval = insertInterval->valueint;
if (insertInterval->valueint < 0) {
errorPrint("%s() LN%d, failed to read json, insert_interval input mistake\n",
__func__, __LINE__);
goto PARSE_OVER;
}
} else if (!insertInterval) {
verbosePrint("%s() LN%d: stable insert interval be overrided by global %"PRId64".\n",
verbosePrint("%s() LN%d: stable insert interval be overrided by global %"PRIu64".\n",
__func__, __LINE__, g_args.insert_interval);
g_Dbs.db[i].superTbls[j].insertInterval = g_args.insert_interval;
} else {
......@@ -3982,6 +4036,11 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
cJSON* gQueryTimes = cJSON_GetObjectItem(root, "query_times");
if (gQueryTimes && gQueryTimes->type == cJSON_Number) {
if (gQueryTimes->valueint < 0) {
errorPrint("%s() LN%d, failed to read json, query_times input mistake\n",
__func__, __LINE__);
goto PARSE_OVER;
}
g_args.query_times = gQueryTimes->valueint;
} else if (!gQueryTimes) {
g_args.query_times = 1;
......@@ -4009,10 +4068,10 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
goto PARSE_OVER;
}
// super_table_query
// specified_table_query
cJSON *specifiedQuery = cJSON_GetObjectItem(root, "specified_table_query");
if (!specifiedQuery) {
g_queryInfo.specifiedQueryInfo.concurrent = 0;
g_queryInfo.specifiedQueryInfo.concurrent = 1;
g_queryInfo.specifiedQueryInfo.sqlCount = 0;
} else if (specifiedQuery->type != cJSON_Object) {
printf("ERROR: failed to read json, super_table_query not found\n");
......@@ -4028,6 +4087,12 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
cJSON* specifiedQueryTimes = cJSON_GetObjectItem(specifiedQuery,
"query_times");
if (specifiedQueryTimes && specifiedQueryTimes->type == cJSON_Number) {
if (specifiedQueryTimes->valueint < 0) {
errorPrint("%s() LN%d, failed to read json, query_times input mistake\n",
__func__, __LINE__);
goto PARSE_OVER;
}
g_queryInfo.specifiedQueryInfo.queryTimes = specifiedQueryTimes->valueint;
} else if (!specifiedQueryTimes) {
g_queryInfo.specifiedQueryInfo.queryTimes = g_args.query_times;
......@@ -4039,13 +4104,14 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
cJSON* concurrent = cJSON_GetObjectItem(specifiedQuery, "concurrent");
if (concurrent && concurrent->type == cJSON_Number) {
g_queryInfo.specifiedQueryInfo.concurrent = concurrent->valueint;
if (g_queryInfo.specifiedQueryInfo.concurrent <= 0) {
errorPrint("%s() LN%d, query sqlCount %"PRId64" or concurrent %"PRId64" is not correct.\n",
__func__, __LINE__, g_queryInfo.specifiedQueryInfo.sqlCount,
if (concurrent->valueint <= 0) {
errorPrint("%s() LN%d, query sqlCount %"PRIu64" or concurrent %"PRIu64" is not correct.\n",
__func__, __LINE__,
g_queryInfo.specifiedQueryInfo.sqlCount,
g_queryInfo.specifiedQueryInfo.concurrent);
goto PARSE_OVER;
}
g_queryInfo.specifiedQueryInfo.concurrent = concurrent->valueint;
} else if (!concurrent) {
g_queryInfo.specifiedQueryInfo.concurrent = 1;
}
......@@ -4149,7 +4215,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
// sub_table_query
cJSON *superQuery = cJSON_GetObjectItem(root, "super_table_query");
if (!superQuery) {
g_queryInfo.superQueryInfo.threadCnt = 0;
g_queryInfo.superQueryInfo.threadCnt = 1;
g_queryInfo.superQueryInfo.sqlCount = 0;
} else if (superQuery->type != cJSON_Object) {
printf("ERROR: failed to read json, sub_table_query not found\n");
......@@ -4165,6 +4231,11 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
cJSON* superQueryTimes = cJSON_GetObjectItem(superQuery, "query_times");
if (superQueryTimes && superQueryTimes->type == cJSON_Number) {
if (superQueryTimes->valueint < 0) {
errorPrint("%s() LN%d, failed to read json, query_times input mistake\n",
__func__, __LINE__);
goto PARSE_OVER;
}
g_queryInfo.superQueryInfo.queryTimes = superQueryTimes->valueint;
} else if (!superQueryTimes) {
g_queryInfo.superQueryInfo.queryTimes = g_args.query_times;
......@@ -4176,6 +4247,12 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
cJSON* threads = cJSON_GetObjectItem(superQuery, "threads");
if (threads && threads->type == cJSON_Number) {
if (threads->valueint <= 0) {
errorPrint("%s() LN%d, failed to read json, threads input mistake\n",
__func__, __LINE__);
goto PARSE_OVER;
}
g_queryInfo.superQueryInfo.threadCnt = threads->valueint;
} else if (!threads) {
g_queryInfo.superQueryInfo.threadCnt = 1;
......@@ -4215,10 +4292,15 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
g_queryInfo.superQueryInfo.mode = SYNC_QUERY_MODE;
}
cJSON* subinterval = cJSON_GetObjectItem(superQuery, "interval");
if (subinterval && subinterval->type == cJSON_Number) {
g_queryInfo.superQueryInfo.subscribeInterval = subinterval->valueint;
} else if (!subinterval) {
cJSON* superInterval = cJSON_GetObjectItem(superQuery, "interval");
if (superInterval && superInterval->type == cJSON_Number) {
if (superInterval->valueint < 0) {
errorPrint("%s() LN%d, failed to read json, interval input mistake\n",
__func__, __LINE__);
goto PARSE_OVER;
}
g_queryInfo.superQueryInfo.subscribeInterval = superInterval->valueint;
} else if (!superInterval) {
//printf("failed to read json, subscribe interval no found\n");
//goto PARSE_OVER;
g_queryInfo.superQueryInfo.subscribeInterval = 10000;
......@@ -4438,11 +4520,11 @@ static int64_t generateRowData(char* recBuf, int64_t timestamp, SSuperTable* stb
char *pstr = recBuf;
int64_t maxLen = MAX_DATA_SIZE;
dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "(%" PRId64 ", ", timestamp);
dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "(%" PRId64 ",", timestamp);
for (int i = 0; i < stbInfo->columnCount; i++) {
if ((0 == strncasecmp(stbInfo->columns[i].dataType, "binary", 6))
|| (0 == strncasecmp(stbInfo->columns[i].dataType, "nchar", 5))) {
if ((0 == strncasecmp(stbInfo->columns[i].dataType, "BINARY", strlen("BINARY")))
|| (0 == strncasecmp(stbInfo->columns[i].dataType, "NCHAR", strlen("NCHAR")))) {
if (stbInfo->columns[i].dataLen > TSDB_MAX_BINARY_LEN) {
errorPrint( "binary or nchar length overflow, max size:%u\n",
(uint32_t)TSDB_MAX_BINARY_LEN);
......@@ -4455,47 +4537,47 @@ static int64_t generateRowData(char* recBuf, int64_t timestamp, SSuperTable* stb
return -1;
}
rand_string(buf, stbInfo->columns[i].dataLen);
dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "\'%s\', ", buf);
dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "\'%s\',", buf);
tmfree(buf);
} else if (0 == strncasecmp(stbInfo->columns[i].dataType,
"int", 3)) {
"INT", 3)) {
dataLen += snprintf(pstr + dataLen, maxLen - dataLen,
"%d, ", rand_int());
"%d,", rand_int());
} else if (0 == strncasecmp(stbInfo->columns[i].dataType,
"bigint", 6)) {
"BIGINT", 6)) {
dataLen += snprintf(pstr + dataLen, maxLen - dataLen,
"%"PRId64", ", rand_bigint());
"%"PRId64",", rand_bigint());
} else if (0 == strncasecmp(stbInfo->columns[i].dataType,
"float", 5)) {
"FLOAT", 5)) {
dataLen += snprintf(pstr + dataLen, maxLen - dataLen,
"%f, ", rand_float());
"%f,", rand_float());
} else if (0 == strncasecmp(stbInfo->columns[i].dataType,
"double", 6)) {
"DOUBLE", 6)) {
dataLen += snprintf(pstr + dataLen, maxLen - dataLen,
"%f, ", rand_double());
"%f,", rand_double());
} else if (0 == strncasecmp(stbInfo->columns[i].dataType,
"smallint", 8)) {
"SMALLINT", 8)) {
dataLen += snprintf(pstr + dataLen, maxLen - dataLen,
"%d, ", rand_smallint());
"%d,", rand_smallint());
} else if (0 == strncasecmp(stbInfo->columns[i].dataType,
"tinyint", strlen("tinyint"))) {
"TINYINT", strlen("TINYINT"))) {
dataLen += snprintf(pstr + dataLen, maxLen - dataLen,
"%d, ", rand_tinyint());
"%d,", rand_tinyint());
} else if (0 == strncasecmp(stbInfo->columns[i].dataType,
"bool", strlen("bool"))) {
"BOOL", strlen("BOOL"))) {
dataLen += snprintf(pstr + dataLen, maxLen - dataLen,
"%d, ", rand_bool());
"%d,", rand_bool());
} else if (0 == strncasecmp(stbInfo->columns[i].dataType,
"timestamp", strlen("timestamp"))) {
"TIMESTAMP", strlen("TIMESTAMP"))) {
dataLen += snprintf(pstr + dataLen, maxLen - dataLen,
"%"PRId64", ", rand_bigint());
"%"PRId64",", rand_bigint());
} else {
errorPrint( "No support data type: %s\n", stbInfo->columns[i].dataType);
return -1;
}
}
dataLen -= 2;
dataLen -= 1;
dataLen += snprintf(pstr + dataLen, maxLen - dataLen, ")");
verbosePrint("%s() LN%d, recBuf:\n\t%s\n", __func__, __LINE__, recBuf);
......@@ -4522,31 +4604,31 @@ static int64_t generateData(char *recBuf, char **data_type,
}
for (int i = 0; i < c; i++) {
if (strcasecmp(data_type[i % c], "tinyint") == 0) {
pstr += sprintf(pstr, ", %d", rand_tinyint() );
} else if (strcasecmp(data_type[i % c], "smallint") == 0) {
pstr += sprintf(pstr, ", %d", rand_smallint());
} else if (strcasecmp(data_type[i % c], "int") == 0) {
pstr += sprintf(pstr, ", %d", rand_int());
} else if (strcasecmp(data_type[i % c], "bigint") == 0) {
pstr += sprintf(pstr, ", %" PRId64, rand_bigint());
} else if (strcasecmp(data_type[i % c], "float") == 0) {
pstr += sprintf(pstr, ", %10.4f", rand_float());
} else if (strcasecmp(data_type[i % c], "double") == 0) {
if (strcasecmp(data_type[i % c], "TINYINT") == 0) {
pstr += sprintf(pstr, ",%d", rand_tinyint() );
} else if (strcasecmp(data_type[i % c], "SMALLINT") == 0) {
pstr += sprintf(pstr, ",%d", rand_smallint());
} else if (strcasecmp(data_type[i % c], "INT") == 0) {
pstr += sprintf(pstr, ",%d", rand_int());
} else if (strcasecmp(data_type[i % c], "BIGINT") == 0) {
pstr += sprintf(pstr, ",%" PRId64, rand_bigint());
} else if (strcasecmp(data_type[i % c], "FLOAT") == 0) {
pstr += sprintf(pstr, ",%10.4f", rand_float());
} else if (strcasecmp(data_type[i % c], "DOUBLE") == 0) {
double t = rand_double();
pstr += sprintf(pstr, ", %20.8f", t);
} else if (strcasecmp(data_type[i % c], "bool") == 0) {
pstr += sprintf(pstr, ",%20.8f", t);
} else if (strcasecmp(data_type[i % c], "BOOL") == 0) {
bool b = taosRandom() & 1;
pstr += sprintf(pstr, ", %s", b ? "true" : "false");
} else if (strcasecmp(data_type[i % c], "binary") == 0) {
pstr += sprintf(pstr, ",%s", b ? "true" : "false");
} else if (strcasecmp(data_type[i % c], "BINARY") == 0) {
char *s = malloc(lenOfBinary);
rand_string(s, lenOfBinary);
pstr += sprintf(pstr, ", \"%s\"", s);
pstr += sprintf(pstr, ",\"%s\"", s);
free(s);
} else if (strcasecmp(data_type[i % c], "nchar") == 0) {
} else if (strcasecmp(data_type[i % c], "NCHAR") == 0) {
char *s = malloc(lenOfBinary);
rand_string(s, lenOfBinary);
pstr += sprintf(pstr, ", \"%s\"", s);
pstr += sprintf(pstr, ",\"%s\"", s);
free(s);
}
......@@ -4569,7 +4651,7 @@ static int prepareSampleDataForSTable(SSuperTable *superTblInfo) {
sampleDataBuf = calloc(
superTblInfo->lenOfOneRow * MAX_SAMPLES_ONCE_FROM_FILE, 1);
if (sampleDataBuf == NULL) {
errorPrint("%s() LN%d, Failed to calloc %"PRId64" Bytes, reason:%s\n",
errorPrint("%s() LN%d, Failed to calloc %"PRIu64" Bytes, reason:%s\n",
__func__, __LINE__,
superTblInfo->lenOfOneRow * MAX_SAMPLES_ONCE_FROM_FILE,
strerror(errno));
......@@ -4620,18 +4702,17 @@ static int64_t execInsert(threadInfo *pThreadInfo, char *buffer, int k)
return affectedRows;
}
static void getTableName(char *pTblName, threadInfo* pThreadInfo, int64_t tableSeq)
static void getTableName(char *pTblName, threadInfo* pThreadInfo, uint64_t tableSeq)
{
SSuperTable* superTblInfo = pThreadInfo->superTblInfo;
if (superTblInfo) {
if ((superTblInfo->childTblOffset >= 0)
&& (superTblInfo->childTblLimit > 0)) {
if (superTblInfo->childTblLimit > 0) {
snprintf(pTblName, TSDB_TABLE_NAME_LEN, "%s",
superTblInfo->childTblName +
(tableSeq - superTblInfo->childTblOffset) * TSDB_TABLE_NAME_LEN);
} else {
verbosePrint("[%d] %s() LN%d: from=%"PRId64" count=%"PRId64" seq=%"PRId64"\n",
verbosePrint("[%d] %s() LN%d: from=%"PRIu64" count=%"PRIu64" seq=%"PRIu64"\n",
pThreadInfo->threadID, __func__, __LINE__,
pThreadInfo->start_table_from,
pThreadInfo->ntables, tableSeq);
......@@ -4639,31 +4720,31 @@ static void getTableName(char *pTblName, threadInfo* pThreadInfo, int64_t tableS
superTblInfo->childTblName + tableSeq * TSDB_TABLE_NAME_LEN);
}
} else {
snprintf(pTblName, TSDB_TABLE_NAME_LEN, "%s%"PRId64"",
snprintf(pTblName, TSDB_TABLE_NAME_LEN, "%s%"PRIu64"",
g_args.tb_prefix, tableSeq);
}
}
static int64_t generateDataTail(
SSuperTable* superTblInfo,
int64_t batch, char* buffer, int64_t remainderBufLen, int64_t insertRows,
uint64_t batch, char* buffer, int64_t remainderBufLen, int64_t insertRows,
int64_t startFrom, int64_t startTime, int64_t *pSamplePos, int64_t *dataLen) {
int64_t len = 0;
int ncols_per_record = 1; // count first col ts
uint64_t len = 0;
uint32_t ncols_per_record = 1; // count first col ts
char *pstr = buffer;
if (superTblInfo == NULL) {
int datatypeSeq = 0;
uint32_t datatypeSeq = 0;
while(g_args.datatype[datatypeSeq]) {
datatypeSeq ++;
ncols_per_record ++;
}
}
verbosePrint("%s() LN%d batch=%"PRId64"\n", __func__, __LINE__, batch);
verbosePrint("%s() LN%d batch=%"PRIu64"\n", __func__, __LINE__, batch);
int64_t k = 0;
uint64_t k = 0;
for (k = 0; k < batch;) {
char data[MAX_DATA_SIZE];
memset(data, 0, MAX_DATA_SIZE);
......@@ -4738,7 +4819,7 @@ static int64_t generateDataTail(
remainderBufLen -= retLen;
}
verbosePrint("%s() LN%d len=%"PRId64" k=%"PRId64" \nbuffer=%s\n",
verbosePrint("%s() LN%d len=%"PRIu64" k=%"PRIu64" \nbuffer=%s\n",
__func__, __LINE__, len, k, buffer);
startFrom ++;
......@@ -4820,12 +4901,12 @@ static int generateSQLHead(char *tableName, int32_t tableSeq,
}
static int64_t generateInterlaceDataBuffer(
char *tableName, int64_t batchPerTbl, int64_t i, int64_t batchPerTblTimes,
int64_t tableSeq,
char *tableName, uint64_t batchPerTbl, uint64_t i, uint64_t batchPerTblTimes,
uint64_t tableSeq,
threadInfo *pThreadInfo, char *buffer,
int64_t insertRows,
uint64_t insertRows,
int64_t startTime,
int64_t *pRemainderBufLen)
uint64_t *pRemainderBufLen)
{
assert(buffer);
char *pstr = buffer;
......@@ -4838,7 +4919,7 @@ static int64_t generateInterlaceDataBuffer(
return 0;
}
// generate data buffer
verbosePrint("[%d] %s() LN%d i=%"PRId64" buffer:\n%s\n",
verbosePrint("[%d] %s() LN%d i=%"PRIu64" buffer:\n%s\n",
pThreadInfo->threadID, __func__, __LINE__, i, buffer);
pstr += headLen;
......@@ -4846,7 +4927,7 @@ static int64_t generateInterlaceDataBuffer(
int64_t dataLen = 0;
verbosePrint("[%d] %s() LN%d i=%"PRId64" batchPerTblTimes=%"PRId64" batchPerTbl = %"PRId64"\n",
verbosePrint("[%d] %s() LN%d i=%"PRIu64" batchPerTblTimes=%"PRIu64" batchPerTbl = %"PRIu64"\n",
pThreadInfo->threadID, __func__, __LINE__,
i, batchPerTblTimes, batchPerTbl);
......@@ -4868,7 +4949,7 @@ static int64_t generateInterlaceDataBuffer(
pstr += dataLen;
*pRemainderBufLen -= dataLen;
} else {
debugPrint("%s() LN%d, generated data tail: %"PRId64", not equal batch per table: %"PRId64"\n",
debugPrint("%s() LN%d, generated data tail: %"PRIu64", not equal batch per table: %"PRIu64"\n",
__func__, __LINE__, k, batchPerTbl);
pstr -= headLen;
pstr[0] = '\0';
......@@ -4963,10 +5044,10 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
// TODO: prompt tbl count multple interlace rows and batch
//
int64_t maxSqlLen = superTblInfo?superTblInfo->maxSqlLen:g_args.max_sql_len;
uint64_t maxSqlLen = superTblInfo?superTblInfo->maxSqlLen:g_args.max_sql_len;
char* buffer = calloc(maxSqlLen, 1);
if (NULL == buffer) {
errorPrint( "%s() LN%d, Failed to alloc %"PRId64" Bytes, reason:%s\n",
errorPrint( "%s() LN%d, Failed to alloc %"PRIu64" Bytes, reason:%s\n",
__func__, __LINE__, maxSqlLen, strerror(errno));
return NULL;
}
......@@ -4978,18 +5059,18 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
int64_t nTimeStampStep = superTblInfo?superTblInfo->timeStampStep:DEFAULT_TIMESTAMP_STEP;
int insert_interval =
uint64_t insert_interval =
superTblInfo?superTblInfo->insertInterval:g_args.insert_interval;
int64_t st = 0;
int64_t et = 0xffffffff;
uint64_t st = 0;
uint64_t et = UINT64_MAX;
int64_t lastPrintTime = taosGetTimestampMs();
int64_t startTs = taosGetTimestampMs();
int64_t endTs;
uint64_t lastPrintTime = taosGetTimestampMs();
uint64_t startTs = taosGetTimestampMs();
uint64_t endTs;
int64_t tableSeq = pThreadInfo->start_table_from;
uint64_t tableSeq = pThreadInfo->start_table_from;
debugPrint("[%d] %s() LN%d: start_table_from=%"PRId64" ntables=%"PRId64" insertRows=%"PRId64"\n",
debugPrint("[%d] %s() LN%d: start_table_from=%"PRIu64" ntables=%"PRIu64" insertRows=%"PRIu64"\n",
pThreadInfo->threadID, __func__, __LINE__, pThreadInfo->start_table_from,
pThreadInfo->ntables, insertRows);
......@@ -5021,7 +5102,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
}
// generate data
memset(buffer, 0, maxSqlLen);
int64_t remainderBufLen = maxSqlLen;
uint64_t remainderBufLen = maxSqlLen;
char *pstr = buffer;
......@@ -5040,7 +5121,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
return NULL;
}
int64_t oldRemainderLen = remainderBufLen;
uint64_t oldRemainderLen = remainderBufLen;
int64_t generated = generateInterlaceDataBuffer(
tableName, batchPerTbl, i, batchPerTblTimes,
tableSeq,
......@@ -5521,15 +5602,15 @@ static void startMultiThreadInsertData(int threads, char* db_name,
int startFrom;
if (superTblInfo) {
int limit, offset;
int64_t limit;
uint64_t offset;
if ((NULL != g_args.sqlFile) && (superTblInfo->childTblExists == TBL_NO_EXISTS) &&
((superTblInfo->childTblOffset != 0) || (superTblInfo->childTblLimit >= 0))) {
printf("WARNING: offset and limit will not be used since the child tables not exists!\n");
}
if ((superTblInfo->childTblExists == TBL_ALREADY_EXISTS)
&& (superTblInfo->childTblOffset >= 0)) {
if (superTblInfo->childTblExists == TBL_ALREADY_EXISTS) {
if ((superTblInfo->childTblLimit < 0)
|| ((superTblInfo->childTblOffset + superTblInfo->childTblLimit)
> (superTblInfo->childTblCount))) {
......@@ -5574,7 +5655,7 @@ static void startMultiThreadInsertData(int threads, char* db_name,
exit(-1);
}
int64_t childTblCount;
uint64_t childTblCount;
getChildNameOfSuperTableWithLimitAndOffset(
taos,
db_name, superTblInfo->sTblName,
......@@ -5613,7 +5694,7 @@ static void startMultiThreadInsertData(int threads, char* db_name,
t_info->superTblInfo = superTblInfo;
t_info->start_time = start_time;
t_info->minDelay = INT64_MAX;
t_info->minDelay = UINT64_MAX;
if ((NULL == superTblInfo) ||
(0 == strncasecmp(superTblInfo->insertMode, "taosc", 5))) {
......@@ -5658,7 +5739,7 @@ static void startMultiThreadInsertData(int threads, char* db_name,
int64_t totalDelay = 0;
int64_t maxDelay = 0;
int64_t minDelay = INT64_MAX;
int64_t minDelay = UINT64_MAX;
int64_t cntDelay = 1;
double avgDelay = 0;
......@@ -5762,11 +5843,11 @@ static void *readTable(void *sarg) {
printf("%d records:\n", totalData);
fprintf(fp, "| QFunctions | QRecords | QSpeed(R/s) | QLatency(ms) |\n");
for (int j = 0; j < n; j++) {
for (uint64_t j = 0; j < n; j++) {
double totalT = 0;
int count = 0;
for (int i = 0; i < num_of_tables; i++) {
sprintf(command, "select %s from %s%d where ts>= %" PRId64,
uint64_t count = 0;
for (uint64_t i = 0; i < num_of_tables; i++) {
sprintf(command, "select %s from %s%"PRIu64" where ts>= %" PRIu64,
aggreFunc[j], tb_prefix, i, sTime);
double t = taosGetTimestampMs();
......@@ -6002,8 +6083,8 @@ static void *specifiedTableQuery(void *sarg) {
int queryTimes = g_queryInfo.specifiedQueryInfo.queryTimes;
int totalQueried = 0;
int64_t lastPrintTime = taosGetTimestampMs();
int64_t startTs = taosGetTimestampMs();
uint64_t lastPrintTime = taosGetTimestampMs();
uint64_t startTs = taosGetTimestampMs();
while(queryTimes --) {
if (g_queryInfo.specifiedQueryInfo.queryInterval && (et - st) <
......@@ -6049,10 +6130,10 @@ static void *specifiedTableQuery(void *sarg) {
et = taosGetTimestampMs();
int64_t currentPrintTime = taosGetTimestampMs();
int64_t endTs = taosGetTimestampMs();
uint64_t currentPrintTime = taosGetTimestampMs();
uint64_t endTs = taosGetTimestampMs();
if (currentPrintTime - lastPrintTime > 30*1000) {
debugPrint("%s() LN%d, endTs=%"PRId64"ms, startTs=%"PRId64"ms\n",
debugPrint("%s() LN%d, endTs=%"PRIu64"ms, startTs=%"PRIu64"ms\n",
__func__, __LINE__, endTs, startTs);
printf("thread[%d] has currently completed queries: %d, QPS: %10.6f\n",
pThreadInfo->threadID,
......@@ -6149,7 +6230,7 @@ static void *superTableQuery(void *sarg) {
}
}
et = taosGetTimestampMs();
printf("####thread[%"PRId64"] complete all sqls to allocate all sub-tables[%"PRId64" - %"PRId64"] once queries duration:%.4fs\n\n",
printf("####thread[%"PRId64"] complete all sqls to allocate all sub-tables[%"PRIu64" - %"PRIu64"] once queries duration:%.4fs\n\n",
taosGetSelfPthreadId(),
pThreadInfo->start_table_from,
pThreadInfo->end_table_to,
......@@ -6415,7 +6496,7 @@ static void *superSubscribe(void *sarg) {
}
}
//et = taosGetTimestampMs();
//printf("========thread[%"PRId64"] complete all sqls to super table once queries duration:%.4fs\n", taosGetSelfPthreadId(), (double)(et - st)/1000.0);
//printf("========thread[%"PRIu64"] complete all sqls to super table once queries duration:%.4fs\n", taosGetSelfPthreadId(), (double)(et - st)/1000.0);
} while(0);
// start loop to consume result
......@@ -6481,7 +6562,7 @@ static void *specifiedSubscribe(void *sarg) {
do {
//if (g_queryInfo.specifiedQueryInfo.queryInterval && (et - st) < g_queryInfo.specifiedQueryInfo.queryInterval) {
// taosMsleep(g_queryInfo.specifiedQueryInfo.queryInterval- (et - st)); // ms
// //printf("========sleep duration:%"PRId64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, pThreadInfo->start_table_from, pThreadInfo->end_table_to);
// //printf("========sleep duration:%"PRIu64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, pThreadInfo->start_table_from, pThreadInfo->end_table_to);
//}
//st = taosGetTimestampMs();
......@@ -6501,7 +6582,7 @@ static void *specifiedSubscribe(void *sarg) {
}
}
//et = taosGetTimestampMs();
//printf("========thread[%"PRId64"] complete all sqls to super table once queries duration:%.4fs\n", taosGetSelfPthreadId(), (double)(et - st)/1000.0);
//printf("========thread[%"PRIu64"] complete all sqls to super table once queries duration:%.4fs\n", taosGetSelfPthreadId(), (double)(et - st)/1000.0);
} while(0);
// start loop to consume result
......@@ -6571,8 +6652,9 @@ static int subscribeTestProcess() {
//==== create sub threads for query from super table
if ((g_queryInfo.specifiedQueryInfo.sqlCount <= 0) ||
(g_queryInfo.specifiedQueryInfo.concurrent <= 0)) {
errorPrint("%s() LN%d, query sqlCount %"PRId64" or concurrent %"PRId64" is not correct.\n",
__func__, __LINE__, g_queryInfo.specifiedQueryInfo.sqlCount,
errorPrint("%s() LN%d, query sqlCount %"PRIu64" or concurrent %"PRIu64" is not correct.\n",
__func__, __LINE__,
g_queryInfo.specifiedQueryInfo.sqlCount,
g_queryInfo.specifiedQueryInfo.concurrent);
exit(-1);
}
......
......@@ -99,11 +99,13 @@ enum _describe_table_index {
TSDB_MAX_DESCRIBE_METRIC
};
#define COL_NOTE_LEN 128
typedef struct {
char field[TSDB_COL_NAME_LEN + 1];
char type[16];
int length;
char note[128];
char note[COL_NOTE_LEN];
} SColDes;
typedef struct {
......@@ -523,7 +525,7 @@ int main(int argc, char *argv[]) {
/* Parse our arguments; every option seen by parse_opt will be
reflected in arguments. */
if (argc > 1)
if (argc > 2)
parse_args(argc, argv, &g_args);
argp_parse(&argp, argc, argv, 0, 0, &g_args);
......@@ -1188,16 +1190,16 @@ int taosGetTableDes(char* dbName, char *table, STableDef *tableDes, TAOS* taosCo
case TSDB_DATA_TYPE_BINARY: {
memset(tableDes->cols[i].note, 0, sizeof(tableDes->cols[i].note));
tableDes->cols[i].note[0] = '\'';
char tbuf[COMMAND_SIZE];
converStringToReadable((char *)row[0], length[0], tbuf, COMMAND_SIZE);
char tbuf[COL_NOTE_LEN];
converStringToReadable((char *)row[0], length[0], tbuf, COL_NOTE_LEN);
char* pstr = stpcpy(&(tableDes->cols[i].note[1]), tbuf);
*(pstr++) = '\'';
break;
}
case TSDB_DATA_TYPE_NCHAR: {
memset(tableDes->cols[i].note, 0, sizeof(tableDes->cols[i].note));
char tbuf[COMMAND_SIZE];
convertNCharToReadable((char *)row[0], length[0], tbuf, COMMAND_SIZE);
char tbuf[COL_NOTE_LEN-2]; // need reserve 2 bytes for ' '
convertNCharToReadable((char *)row[0], length[0], tbuf, COL_NOTE_LEN);
sprintf(tableDes->cols[i].note, "\'%s\'", tbuf);
break;
}
......
......@@ -295,7 +295,7 @@ void *rpcOpen(const SRpcInit *pInit) {
return NULL;
}
} else {
pRpc->pCache = rpcOpenConnCache(pRpc->sessions, rpcCloseConn, pRpc->tmrCtrl, pRpc->idleTime * 30);
pRpc->pCache = rpcOpenConnCache(pRpc->sessions, rpcCloseConn, pRpc->tmrCtrl, pRpc->idleTime * 20);
if ( pRpc->pCache == NULL ) {
tError("%s failed to init connection cache", pRpc->label);
rpcClose(pRpc);
......
......@@ -17332,3 +17332,168 @@
fun:PyVectorcall_Call
fun:_PyEval_EvalFrameDefault
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
match-leak-kinds: definite
fun:malloc
fun:lib_build_and_cache_attr
fun:lib_getattr
fun:_PyEval_EvalFrameDefault
fun:_PyFunction_Vectorcall
fun:_PyEval_EvalFrameDefault
fun:_PyEval_EvalCodeWithName
fun:PyEval_EvalCode
obj:/usr/bin/python3.8
obj:/usr/bin/python3.8
fun:PyVectorcall_Call
fun:_PyEval_EvalFrameDefault
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
match-leak-kinds: definite
fun:malloc
fun:lib_build_and_cache_attr
fun:lib_getattr
obj:/usr/bin/python3.8
fun:_PyEval_EvalFrameDefault
fun:_PyFunction_Vectorcall
fun:_PyEval_EvalFrameDefault
obj:/usr/bin/python3.8
fun:_PyEval_EvalFrameDefault
fun:PyEval_EvalCode
obj:/usr/bin/python3.8
fun:_PyEval_EvalFrameDefault
fun:_PyEval_EvalCodeWithName
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
match-leak-kinds: definite
fun:malloc
fun:_my_Py_InitModule
fun:lib_getattr
fun:b_init_cffi_1_0_external_module
obj:/usr/bin/python3.8
obj:/usr/bin/python3.8
obj:/usr/bin/python3.8
fun:PyObject_CallMethod
fun:_cffi_init
fun:PyInit__bcrypt
fun:_PyImport_LoadDynamicModuleWithSpec
obj:/usr/bin/python3.8
obj:/usr/bin/python3.8
fun:PyVectorcall_Call
fun:_PyEval_EvalFrameDefault
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
match-leak-kinds: definite
fun:malloc
fun:_PyObject_GC_New
fun:lib_getattr
fun:ffi_internal_new
fun:b_init_cffi_1_0_external_module
obj:/usr/bin/python3.8
obj:/usr/bin/python3.8
obj:/usr/bin/python3.8
fun:PyObject_CallMethod
fun:_cffi_init
fun:PyInit__bcrypt
fun:_PyImport_LoadDynamicModuleWithSpec
obj:/usr/bin/python3.8
obj:/usr/bin/python3.8
fun:PyVectorcall_Call
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
match-leak-kinds: definite
fun:malloc
fun:lib_build_cpython_func.isra.87
fun:lib_build_and_cache_attr
fun:lib_getattr
obj:/usr/bin/python3.8
fun:_PyEval_EvalFrameDefault
fun:_PyFunction_Vectorcall
fun:_PyEval_EvalFrameDefault
obj:/usr/bin/python3.8
fun:_PyEval_EvalFrameDefault
obj:/usr/bin/python3.8
fun:_PyEval_EvalFrameDefault
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
match-leak-kinds: definite
fun:malloc
fun:lib_build_and_cache_attr
fun:lib_getattr
obj:/usr/bin/python3.8
fun:_PyEval_EvalFrameDefault
fun:_PyFunction_Vectorcall
fun:_PyEval_EvalFrameDefault
obj:/usr/bin/python3.8
fun:_PyEval_EvalFrameDefault
obj:/usr/bin/python3.8
fun:_PyEval_EvalFrameDefault
fun:_PyEval_EvalCodeWithName
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
match-leak-kinds: definite
fun:malloc
fun:_my_Py_InitModule
fun:b_init_cffi_1_0_external_module
obj:/usr/bin/python3.8
obj:/usr/bin/python3.8
obj:/usr/bin/python3.8
fun:PyObject_CallMethod
fun:_cffi_init
fun:PyInit__bcrypt
fun:_PyImport_LoadDynamicModuleWithSpec
obj:/usr/bin/python3.8
obj:/usr/bin/python3.8
fun:PyVectorcall_Call
fun:_PyEval_EvalFrameDefault
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
match-leak-kinds: definite
fun:malloc
fun:_my_Py_InitModule
fun:b_init_cffi_1_0_external_module
obj:/usr/bin/python3.8
obj:/usr/bin/python3.8
obj:/usr/bin/python3.8
fun:PyObject_CallMethod
fun:PyInit__openssl
fun:_PyImport_LoadDynamicModuleWithSpec
obj:/usr/bin/python3.8
obj:/usr/bin/python3.8
fun:PyVectorcall_Call
fun:_PyEval_EvalFrameDefault
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
match-leak-kinds: definite
fun:malloc
fun:_PyObject_GC_New
fun:ffi_internal_new
fun:b_init_cffi_1_0_external_module
obj:/usr/bin/python3.8
obj:/usr/bin/python3.8
obj:/usr/bin/python3.8
fun:PyObject_CallMethod
fun:_cffi_init
fun:PyInit__bcrypt
fun:_PyImport_LoadDynamicModuleWithSpec
obj:/usr/bin/python3.8
obj:/usr/bin/python3.8
fun:PyVectorcall_Call
}
\ No newline at end of file
......@@ -23,6 +23,7 @@ python3 ./test.py -f insert/insertIntoTwoTables.py
python3 ./test.py -f insert/before_1970.py
python3 bug2265.py
python3 ./test.py -f insert/bug3654.py
python3 ./test.py -f insert/insertDynamicColBeforeVal.py
#table
python3 ./test.py -f table/alter_wal0.py
......
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
def run(self):
tdSql.prepare()
tdSql.execute("drop database if exists db")
tdSql.execute("create database if not exists db keep 3650")
tdSql.execute("use db")
tdLog.printNoPrefix("==========step1:create table")
tdSql.execute(
"create table stb1 (ts timestamp, c11 int, c12 float ) TAGS(t11 int, t12 int )"
)
tdLog.printNoPrefix("==========step2:insert data with new syntax")
tdSql.execute(
"insert into t1 using stb1(t11, t12) tags(11, 12) (ts, c11, c12) values (now, 10, 20)"
)
# case for tag-value
tdSql.execute(
"insert into t2 using stb1(t11) tags(21) (ts, c11, c12) values (now-1m, 11, 21)"
)
tdSql.execute(
"insert into t3 using stb1 tags(31, 32) (ts, c11, c12) values (now-2m, 12, 22)"
)
tdSql.error(
"insert into t4 using stb1(t11, t12) (ts, c11, c12) values (now-3m, 13, 23)"
)
tdSql.error(
"insert into t5 using stb1(t11, t12) tags() (ts, c11, c12) values (now-4m, 14, 24)"
)
tdSql.error(
"insert into t6 using stb1(t11, t12) tags(41) (ts, c11, c12) values (now-5m, 15, 25)"
)
tdSql.error(
"insert into t7 using stb1(t12) tags(51, 52) (ts, c11, c12) values (now-6m, 16, 26)"
)
tdSql.execute(
"insert into t8 using stb1(t11, t12) tags('61', 62) (ts, c11, c12) values (now-7m, 17, 27)"
)
# case for col-value
tdSql.execute(
"insert into t9 using stb1(t11, t12) tags(71, 72) values (now-8m, 18, 28)"
)
tdSql.error(
"insert into t10 using stb1(t11, t12) tags(81, 82) (ts, c11, c12) values ()"
)
tdSql.error(
"insert into t11 using stb1(t11, t12) tags(91, 92) (ts, c11, c12) "
)
tdSql.error(
"insert into t12 using stb1(t11, t12) tags(101, 102) values (now-9m, 19)"
)
tdSql.error(
"insert into t13 using stb1(t11, t12) tags(111, 112) (ts, c11) values (now-10m, 110, 210)"
)
tdSql.error(
"insert into t14 using stb1(t11, t12) tags(121, 122) (ts, c11, c12) values (now-11m, 111)"
)
tdSql.execute(
"insert into t15 using stb1(t11, t12) tags(131, 132) (ts, c11, c12) values (now-12m, NULL , 212)"
)
tdSql.execute(
"insert into t16 using stb1(t11, t12) tags(141, 142) (ts, c11, c12) values (now-13m, 'NULL', 213)"
)
tdSql.error(
"insert into t17 using stb1(t11, t12) tags(151, 152) (ts, c11, c12) values (now-14m, Nan, 214)"
)
tdSql.error(
"insert into t18 using stb1(t11, t12) tags(161, 162) (ts, c11, c12) values (now-15m, 'NaN', 215)"
)
tdSql.execute(
"insert into t19 using stb1(t11, t12) tags(171, 172) (ts, c11) values (now-16m, 216)"
)
tdSql.error(
"insert into t20 using stb1(t11, t12) tags(181, 182) (c11, c12) values (117, 217)"
)
# multi-col_value
tdSql.execute(
"insert into t21 using stb1(t11, t12) tags(191, 192) (ts, c11, c12) values (now-17m, 118, 218)(now-18m, 119, 219)"
)
tdSql.execute(
"insert into t22 using stb1(t11, t12) tags(201, 202) values (now-19m, 120, 220)(now-19m, 121, 221)"
)
tdSql.error(
"insert into t23 using stb1(t11, t12) tags(211, 212) values (now-20m, 122, 222) (ts, c11, c12) values (now-21m, 123, 223)"
)
tdSql.error(
"insert into t24 using stb1(t11, t12) tags(221, 222) (ts, c11, c12) values (now-22m, 124, 224) (ts, c11, c12) values (now-23m, 125, 225)"
)
tdSql.execute(
"insert into t25 (ts, c11, c12) using stb1(t11, t12) tags(231, 232) values (now-24m, 126, 226)(now-25m, 127, 227)"
)
tdSql.error(
"insert into t26 (ts, c11, c12) values (now-24m, 128, 228)(now-25m, 129, 229) using stb1(t11, t12) tags(241, 242) "
)
tdSql.error(
"insert into t27 (ts, c11, c12) values (now-24m, 130, 230) using stb1(t11, t12) tags(251, 252) "
)
tdSql.query("show tables")
tdSql.checkRows(21)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
\ No newline at end of file
......@@ -136,6 +136,11 @@ class TDSql:
def checkData(self, row, col, data):
self.checkRowCol(row, col)
if self.queryResult[row][col] != data:
if self.cursor.istype(col, "TIMESTAMP") and self.queryResult[row][col] == datetime.datetime.fromisoformat(data):
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
(self.sql, row, col, self.queryResult[row][col], data))
return
if str(self.queryResult[row][col]) == str(data):
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
(self.sql, row, col, self.queryResult[row][col], data))
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册