提交 63e5c214 编写于 作者: W wpan

Merge branch 'master' into hotfix/TD-5582

...@@ -777,7 +777,7 @@ function is_version_compatible() { ...@@ -777,7 +777,7 @@ function is_version_compatible() {
if [ -f ${script_dir}/driver/vercomp.txt ]; then if [ -f ${script_dir}/driver/vercomp.txt ]; then
min_compatible_version=`cat ${script_dir}/driver/vercomp.txt` min_compatible_version=`cat ${script_dir}/driver/vercomp.txt`
else else
min_compatible_version=$(${script_dir}/bin/tqd -V | head -1 | cut -d ' ' -f 5) min_compatible_version=$(${script_dir}/bin/taosd -V | head -1 | cut -d ' ' -f 5)
fi fi
vercomp $curr_version $min_compatible_version vercomp $curr_version $min_compatible_version
......
...@@ -746,7 +746,7 @@ function is_version_compatible() { ...@@ -746,7 +746,7 @@ function is_version_compatible() {
if [ -f ${script_dir}/driver/vercomp.txt ]; then if [ -f ${script_dir}/driver/vercomp.txt ]; then
min_compatible_version=`cat ${script_dir}/driver/vercomp.txt` min_compatible_version=`cat ${script_dir}/driver/vercomp.txt`
else else
min_compatible_version=$(${script_dir}/bin/tqd -V | head -1 | cut -d ' ' -f 5) min_compatible_version=$(${script_dir}/bin/powerd -V | head -1 | cut -d ' ' -f 5)
fi fi
vercomp $curr_version $min_compatible_version vercomp $curr_version $min_compatible_version
......
...@@ -41,10 +41,10 @@ fi ...@@ -41,10 +41,10 @@ fi
if [ "$osType" != "Darwin" ]; then if [ "$osType" != "Darwin" ]; then
if [ "$pagMode" == "lite" ]; then if [ "$pagMode" == "lite" ]; then
#strip ${build_dir}/bin/taosd #strip ${build_dir}/bin/taosd
strip ${build_dir}/bin/taos strip ${build_dir}/bin/taos
bin_files="${build_dir}/bin/taos ${script_dir}/remove_client.sh" bin_files="${build_dir}/bin/taos ${script_dir}/remove_client.sh"
else else
bin_files="${build_dir}/bin/taos ${build_dir}/bin/taosdump ${build_dir}/bin/taosdemo \ bin_files="${build_dir}/bin/taos ${build_dir}/bin/taosdump ${build_dir}/bin/taosdemo \
${script_dir}/remove_client.sh ${script_dir}/set_core.sh ${script_dir}/get_client.sh ${script_dir}/taosd-dump-cfg.gdb" ${script_dir}/remove_client.sh ${script_dir}/set_core.sh ${script_dir}/get_client.sh ${script_dir}/taosd-dump-cfg.gdb"
fi fi
...@@ -139,7 +139,7 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then ...@@ -139,7 +139,7 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
cp -r ${examples_dir}/C# ${install_dir}/examples cp -r ${examples_dir}/C# ${install_dir}/examples
fi fi
# Copy driver # Copy driver
mkdir -p ${install_dir}/driver mkdir -p ${install_dir}/driver
cp ${lib_files} ${install_dir}/driver cp ${lib_files} ${install_dir}/driver
# Copy connector # Copy connector
...@@ -168,7 +168,7 @@ fi ...@@ -168,7 +168,7 @@ fi
# exit 1 # exit 1
cd ${release_dir} cd ${release_dir}
# install_dir has been distinguishes cluster from edege, so comments this code # install_dir has been distinguishes cluster from edege, so comments this code
pkg_name=${install_dir}-${osType}-${cpuType} pkg_name=${install_dir}-${osType}-${cpuType}
......
...@@ -1964,6 +1964,7 @@ static void freeQueryInfoImpl(SQueryInfo* pQueryInfo) { ...@@ -1964,6 +1964,7 @@ static void freeQueryInfoImpl(SQueryInfo* pQueryInfo) {
pQueryInfo->tsBuf = tsBufDestroy(pQueryInfo->tsBuf); pQueryInfo->tsBuf = tsBufDestroy(pQueryInfo->tsBuf);
tfree(pQueryInfo->fillVal); tfree(pQueryInfo->fillVal);
pQueryInfo->fillType = 0;
tfree(pQueryInfo->buf); tfree(pQueryInfo->buf);
} }
......
...@@ -95,7 +95,6 @@ extern char configDir[]; ...@@ -95,7 +95,6 @@ extern char configDir[];
#define MAX_SUPER_TABLE_COUNT 200 #define MAX_SUPER_TABLE_COUNT 200
#define MAX_QUERY_SQL_COUNT 100 #define MAX_QUERY_SQL_COUNT 100
#define MAX_QUERY_SQL_LENGTH BUFFER_SIZE
#define MAX_DATABASE_COUNT 256 #define MAX_DATABASE_COUNT 256
#define INPUT_BUF_LEN 256 #define INPUT_BUF_LEN 256
...@@ -382,7 +381,7 @@ typedef struct SpecifiedQueryInfo_S { ...@@ -382,7 +381,7 @@ typedef struct SpecifiedQueryInfo_S {
uint64_t queryTimes; uint64_t queryTimes;
bool subscribeRestart; bool subscribeRestart;
int subscribeKeepProgress; int subscribeKeepProgress;
char sql[MAX_QUERY_SQL_COUNT][MAX_QUERY_SQL_LENGTH+1]; char sql[MAX_QUERY_SQL_COUNT][BUFFER_SIZE+1];
char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN]; char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN];
int resubAfterConsume[MAX_QUERY_SQL_COUNT]; int resubAfterConsume[MAX_QUERY_SQL_COUNT];
int endAfterConsume[MAX_QUERY_SQL_COUNT]; int endAfterConsume[MAX_QUERY_SQL_COUNT];
...@@ -405,7 +404,7 @@ typedef struct SuperQueryInfo_S { ...@@ -405,7 +404,7 @@ typedef struct SuperQueryInfo_S {
int64_t childTblCount; int64_t childTblCount;
char childTblPrefix[TSDB_TABLE_NAME_LEN - 20]; // 20 characters reserved for seq char childTblPrefix[TSDB_TABLE_NAME_LEN - 20]; // 20 characters reserved for seq
int sqlCount; int sqlCount;
char sql[MAX_QUERY_SQL_COUNT][MAX_QUERY_SQL_LENGTH+1]; char sql[MAX_QUERY_SQL_COUNT][BUFFER_SIZE+1];
char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN]; char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN];
int resubAfterConsume; int resubAfterConsume;
int endAfterConsume; int endAfterConsume;
...@@ -1252,14 +1251,14 @@ static void fetchResult(TAOS_RES *res, threadInfo* pThreadInfo) { ...@@ -1252,14 +1251,14 @@ static void fetchResult(TAOS_RES *res, threadInfo* pThreadInfo) {
// fetch the records row by row // fetch the records row by row
while((row = taos_fetch_row(res))) { while((row = taos_fetch_row(res))) {
if (totalLen >= 100*1024*1024 - 32000) { if (totalLen >= (100*1024*1024 - HEAD_BUFF_LEN*2)) {
if (strlen(pThreadInfo->filePath) > 0) if (strlen(pThreadInfo->filePath) > 0)
appendResultBufToFile(databuf, pThreadInfo); appendResultBufToFile(databuf, pThreadInfo);
totalLen = 0; totalLen = 0;
memset(databuf, 0, 100*1024*1024); memset(databuf, 0, 100*1024*1024);
} }
num_rows++; num_rows++;
char temp[16000] = {0}; char temp[HEAD_BUFF_LEN] = {0};
int len = taos_print_row(temp, row, fields, num_fields); int len = taos_print_row(temp, row, fields, num_fields);
len += sprintf(temp + len, "\n"); len += sprintf(temp + len, "\n");
//printf("query result:%s\n", temp); //printf("query result:%s\n", temp);
...@@ -2164,15 +2163,15 @@ static void printfDbInfoForQueryToFile( ...@@ -2164,15 +2163,15 @@ static void printfDbInfoForQueryToFile(
} }
static void printfQuerySystemInfo(TAOS * taos) { static void printfQuerySystemInfo(TAOS * taos) {
char filename[MAX_QUERY_SQL_LENGTH+1] = {0}; char filename[BUFFER_SIZE+1] = {0};
char buffer[MAX_QUERY_SQL_LENGTH+1] = {0}; char buffer[BUFFER_SIZE+1] = {0};
TAOS_RES* res; TAOS_RES* res;
time_t t; time_t t;
struct tm* lt; struct tm* lt;
time(&t); time(&t);
lt = localtime(&t); lt = localtime(&t);
snprintf(filename, MAX_QUERY_SQL_LENGTH, "querySystemInfo-%d-%d-%d %d:%d:%d", snprintf(filename, BUFFER_SIZE, "querySystemInfo-%d-%d-%d %d:%d:%d",
lt->tm_year+1900, lt->tm_mon, lt->tm_mday, lt->tm_hour, lt->tm_min, lt->tm_year+1900, lt->tm_mon, lt->tm_mday, lt->tm_hour, lt->tm_min,
lt->tm_sec); lt->tm_sec);
...@@ -2204,12 +2203,12 @@ static void printfQuerySystemInfo(TAOS * taos) { ...@@ -2204,12 +2203,12 @@ static void printfQuerySystemInfo(TAOS * taos) {
printfDbInfoForQueryToFile(filename, dbInfos[i], i); printfDbInfoForQueryToFile(filename, dbInfos[i], i);
// show db.vgroups // show db.vgroups
snprintf(buffer, MAX_QUERY_SQL_LENGTH, "show %s.vgroups;", dbInfos[i]->name); snprintf(buffer, BUFFER_SIZE, "show %s.vgroups;", dbInfos[i]->name);
res = taos_query(taos, buffer); res = taos_query(taos, buffer);
xDumpResultToFile(filename, res); xDumpResultToFile(filename, res);
// show db.stables // show db.stables
snprintf(buffer, MAX_QUERY_SQL_LENGTH, "show %s.stables;", dbInfos[i]->name); snprintf(buffer, BUFFER_SIZE, "show %s.stables;", dbInfos[i]->name);
res = taos_query(taos, buffer); res = taos_query(taos, buffer);
xDumpResultToFile(filename, res); xDumpResultToFile(filename, res);
free(dbInfos[i]); free(dbInfos[i]);
...@@ -4529,7 +4528,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { ...@@ -4529,7 +4528,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
goto PARSE_OVER; goto PARSE_OVER;
} }
tstrncpy(g_queryInfo.specifiedQueryInfo.sql[j], tstrncpy(g_queryInfo.specifiedQueryInfo.sql[j],
sqlStr->valuestring, MAX_QUERY_SQL_LENGTH); sqlStr->valuestring, BUFFER_SIZE);
// default value is -1, which mean infinite loop // default value is -1, which mean infinite loop
g_queryInfo.specifiedQueryInfo.endAfterConsume[j] = -1; g_queryInfo.specifiedQueryInfo.endAfterConsume[j] = -1;
...@@ -4751,7 +4750,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { ...@@ -4751,7 +4750,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
goto PARSE_OVER; goto PARSE_OVER;
} }
tstrncpy(g_queryInfo.superQueryInfo.sql[j], sqlStr->valuestring, tstrncpy(g_queryInfo.superQueryInfo.sql[j], sqlStr->valuestring,
MAX_QUERY_SQL_LENGTH); BUFFER_SIZE);
cJSON *result = cJSON_GetObjectItem(sql, "result"); cJSON *result = cJSON_GetObjectItem(sql, "result");
if (result != NULL && result->type == cJSON_String if (result != NULL && result->type == cJSON_String
...@@ -7404,14 +7403,14 @@ static void replaceChildTblName(char* inSql, char* outSql, int tblIndex) { ...@@ -7404,14 +7403,14 @@ static void replaceChildTblName(char* inSql, char* outSql, int tblIndex) {
tstrncpy(outSql, inSql, pos - inSql + 1); tstrncpy(outSql, inSql, pos - inSql + 1);
//printf("1: %s\n", outSql); //printf("1: %s\n", outSql);
strncat(outSql, subTblName, MAX_QUERY_SQL_LENGTH - 1); strncat(outSql, subTblName, BUFFER_SIZE - 1);
//printf("2: %s\n", outSql); //printf("2: %s\n", outSql);
strncat(outSql, pos+strlen(sourceString), MAX_QUERY_SQL_LENGTH - 1); strncat(outSql, pos+strlen(sourceString), BUFFER_SIZE - 1);
//printf("3: %s\n", outSql); //printf("3: %s\n", outSql);
} }
static void *superTableQuery(void *sarg) { static void *superTableQuery(void *sarg) {
char sqlstr[MAX_QUERY_SQL_LENGTH]; char sqlstr[BUFFER_SIZE];
threadInfo *pThreadInfo = (threadInfo *)sarg; threadInfo *pThreadInfo = (threadInfo *)sarg;
setThreadName("superTableQuery"); setThreadName("superTableQuery");
...@@ -7714,7 +7713,7 @@ static TAOS_SUB* subscribeImpl( ...@@ -7714,7 +7713,7 @@ static TAOS_SUB* subscribeImpl(
static void *superSubscribe(void *sarg) { static void *superSubscribe(void *sarg) {
threadInfo *pThreadInfo = (threadInfo *)sarg; threadInfo *pThreadInfo = (threadInfo *)sarg;
char subSqlstr[MAX_QUERY_SQL_LENGTH]; char subSqlstr[BUFFER_SIZE];
TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT] = {0}; TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT] = {0};
uint64_t tsubSeq; uint64_t tsubSeq;
......
def pre_test(){ def pre_test(){
sh '''
sudo rmtaos||echo 'no taosd installed'
'''
sh '''
cd ${WKC}
git reset --hard
git checkout $BRANCH_NAME
git pull
git submodule update
cd ${WK}
git reset --hard
git checkout $BRANCH_NAME
git pull
export TZ=Asia/Harbin
date
rm -rf ${WK}/debug
mkdir debug
cd debug
cmake .. > /dev/null
make > /dev/null
make install > /dev/null
pip3 install ${WKC}/src/connector/python
'''
return 1
}
def pre_test_p(){
sh ''' sh '''
sudo rmtaos||echo 'no taosd installed' sudo rmtaos||echo 'no taosd installed'
''' '''
...@@ -39,7 +66,7 @@ pipeline { ...@@ -39,7 +66,7 @@ pipeline {
stage('pytest') { stage('pytest') {
agent{label 'slad1'} agent{label 'slad1'}
steps { steps {
pre_test() pre_test_p()
sh ''' sh '''
cd ${WKC}/tests cd ${WKC}/tests
find pytest -name '*'sql|xargs rm -rf find pytest -name '*'sql|xargs rm -rf
...@@ -48,7 +75,7 @@ pipeline { ...@@ -48,7 +75,7 @@ pipeline {
} }
} }
stage('test_b1') { stage('test_b1') {
agent{label 'master'} agent{label 'slad2'}
steps { steps {
pre_test() pre_test()
...@@ -62,7 +89,7 @@ pipeline { ...@@ -62,7 +89,7 @@ pipeline {
} }
stage('test_crash_gen') { stage('test_crash_gen') {
agent{label "slad2"} agent{label "slad3"}
steps { steps {
pre_test() pre_test()
sh ''' sh '''
...@@ -92,7 +119,7 @@ pipeline { ...@@ -92,7 +119,7 @@ pipeline {
} }
sh''' sh'''
systemctl start taosd nohup taosd >/dev/null &
sleep 10 sleep 10
''' '''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
...@@ -110,16 +137,8 @@ pipeline { ...@@ -110,16 +137,8 @@ pipeline {
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh ''' sh '''
cd ${WKC}/tests/examples/JDBC/JDBCDemo/ cd ${WKC}/tests/examples/JDBC/JDBCDemo/
mvn clean package assembly:single -DskipTests >/dev/null mvn clean package >/dev/null
java -jar target/JDBCDemo-SNAPSHOT-jar-with-dependencies.jar -host 127.0.0.1 java -jar target/JdbcRestfulDemo-jar-with-dependencies.jar
'''
}
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${WKC}/src/connector/jdbc
mvn clean package -Dmaven.test.skip=true >/dev/null
cd ${WKC}/tests/examples/JDBC/JDBCDemo/
java --class-path=../../../../src/connector/jdbc/target:$JAVA_HOME/jre/lib/ext -jar target/JDBCDemo-SNAPSHOT-jar-with-dependencies.jar -host 127.0.0.1
''' '''
} }
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
...@@ -136,7 +155,7 @@ pipeline { ...@@ -136,7 +155,7 @@ pipeline {
''' '''
} }
sh ''' sh '''
systemctl stop taosd pkill -9 taosd || echo 1
cd ${WKC}/tests cd ${WKC}/tests
./test-all.sh b2 ./test-all.sh b2
date date
...@@ -149,7 +168,7 @@ pipeline { ...@@ -149,7 +168,7 @@ pipeline {
} }
stage('test_valgrind') { stage('test_valgrind') {
agent{label "slad3"} agent{label "slad4"}
steps { steps {
pre_test() pre_test()
...@@ -228,11 +247,11 @@ pipeline { ...@@ -228,11 +247,11 @@ pipeline {
} }
} }
post { post {
success { success {
emailext ( emailext (
subject: "SUCCESSFUL: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]'", subject: "PR-result: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]' SUCCESS",
body: '''<!DOCTYPE html> body: """<!DOCTYPE html>
<html> <html>
<head> <head>
<meta charset="UTF-8"> <meta charset="UTF-8">
...@@ -248,29 +267,29 @@ pipeline { ...@@ -248,29 +267,29 @@ pipeline {
<td> <td>
<ul> <ul>
<div style="font-size:18px"> <div style="font-size:18px">
<li>构建名称>>分支:${PROJECT_NAME}</li> <li>构建名称>>分支:${env.BRANCH_NAME}</li>
<li>构建结果:<span style="color:green"> Successful </span></li> <li>构建结果:<span style="color:green"> Successful </span></li>
<li>构建编号:${BUILD_NUMBER}</li> <li>构建编号:${BUILD_NUMBER}</li>
<li>触发用户:${CAUSE}</li> <li>触发用户:${env.CHANGE_AUTHOR}</li>
<li>变更概要:${CHANGES}</li> <li>提交信息:${env.CHANGE_TITLE}</li>
<li>构建地址:<a href=${BUILD_URL}>${BUILD_URL}</a></li> <li>构建地址:<a href=${BUILD_URL}>${BUILD_URL}</a></li>
<li>构建日志:<a href=${BUILD_URL}console>${BUILD_URL}console</a></li> <li>构建日志:<a href=${BUILD_URL}console>${BUILD_URL}console</a></li>
<li>变更集:${JELLY_SCRIPT}</li>
</div> </div>
</ul> </ul>
</td> </td>
</tr> </tr>
</table></font> </table></font>
</body> </body>
</html>''', </html>""",
to: "yqliu@taosdata.com,pxiao@taosdata.com", to: "yqliu@taosdata.com,pxiao@taosdata.com",
from: "support@taosdata.com" from: "support@taosdata.com"
) )
} }
failure { failure {
emailext ( emailext (
subject: "FAILED: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]'", subject: "PR-result: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]' FAIL",
body: '''<!DOCTYPE html> body: """<!DOCTYPE html>
<html> <html>
<head> <head>
<meta charset="UTF-8"> <meta charset="UTF-8">
...@@ -286,21 +305,21 @@ pipeline { ...@@ -286,21 +305,21 @@ pipeline {
<td> <td>
<ul> <ul>
<div style="font-size:18px"> <div style="font-size:18px">
<li>构建名称>>分支:${PROJECT_NAME}</li> <li>构建名称>>分支:${env.BRANCH_NAME}</li>
<li>构建结果:<span style="color:green"> Successful </span></li> <li>构建结果:<span style="color:red"> Failure </span></li>
<li>构建编号:${BUILD_NUMBER}</li> <li>构建编号:${BUILD_NUMBER}</li>
<li>触发用户:${CAUSE}</li> <li>触发用户:${env.CHANGE_AUTHOR}</li>
<li>变更概要:${CHANGES}</li> <li>提交信息:${env.CHANGE_TITLE}</li>
<li>构建地址:<a href=${BUILD_URL}>${BUILD_URL}</a></li> <li>构建地址:<a href=${BUILD_URL}>${BUILD_URL}</a></li>
<li>构建日志:<a href=${BUILD_URL}console>${BUILD_URL}console</a></li> <li>构建日志:<a href=${BUILD_URL}console>${BUILD_URL}console</a></li>
<li>变更集:${JELLY_SCRIPT}</li>
</div> </div>
</ul> </ul>
</td> </td>
</tr> </tr>
</table></font> </table></font>
</body> </body>
</html>''', </html>""",
to: "yqliu@taosdata.com,pxiao@taosdata.com", to: "yqliu@taosdata.com,pxiao@taosdata.com",
from: "support@taosdata.com" from: "support@taosdata.com"
) )
......
def pre_test(){ def pre_test(){
sh '''
sudo rmtaos||echo 'no taosd installed'
'''
sh '''
cd ${WKC}
git reset --hard
git checkout $BRANCH_NAME
git pull
git submodule update
cd ${WK}
git reset --hard
git checkout $BRANCH_NAME
git pull
export TZ=Asia/Harbin
date
rm -rf ${WK}/debug
mkdir debug
cd debug
cmake .. > /dev/null
make > /dev/null
make install > /dev/null
pip3 install ${WKC}/src/connector/python/ || echo 0
'''
return 1
}
def pre_test_p(){
sh ''' sh '''
sudo rmtaos||echo 'no taosd installed' sudo rmtaos||echo 'no taosd installed'
''' '''
...@@ -39,7 +66,7 @@ pipeline { ...@@ -39,7 +66,7 @@ pipeline {
stage('pytest') { stage('pytest') {
agent{label 'slam1'} agent{label 'slam1'}
steps { steps {
pre_test() pre_test_p()
sh ''' sh '''
cd ${WKC}/tests cd ${WKC}/tests
find pytest -name '*'sql|xargs rm -rf find pytest -name '*'sql|xargs rm -rf
...@@ -92,7 +119,7 @@ pipeline { ...@@ -92,7 +119,7 @@ pipeline {
} }
sh''' sh'''
systemctl start taosd nohup taosd >/dev/null &
sleep 10 sleep 10
''' '''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
...@@ -136,7 +163,7 @@ pipeline { ...@@ -136,7 +163,7 @@ pipeline {
''' '''
} }
sh ''' sh '''
systemctl stop taosd pkill -9 taosd || echo 1
cd ${WKC}/tests cd ${WKC}/tests
./test-all.sh b2 ./test-all.sh b2
date date
......
...@@ -808,6 +808,11 @@ bool simExecuteNativeSqlCommand(SScript *script, char *rest, bool isSlow) { ...@@ -808,6 +808,11 @@ bool simExecuteNativeSqlCommand(SScript *script, char *rest, bool isSlow) {
break; break;
case TSDB_DATA_TYPE_BINARY: case TSDB_DATA_TYPE_BINARY:
case TSDB_DATA_TYPE_NCHAR: case TSDB_DATA_TYPE_NCHAR:
if (length[i] < 0 || length[i] > 1 << 20) {
fprintf(stderr, "Invalid length(%d) of BINARY or NCHAR\n", length[i]);
exit(-1);
}
memset(value, 0, MAX_QUERY_VALUE_LEN); memset(value, 0, MAX_QUERY_VALUE_LEN);
memcpy(value, row[i], length[i]); memcpy(value, row[i], length[i]);
value[length[i]] = 0; value[length[i]] = 0;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册