未验证 提交 e00c9a02 编写于 作者: S Shengliang Guan 提交者: GitHub

Merge pull request #9425 from taosdata/feature/dnode3

Feature/dnode3
......@@ -852,6 +852,18 @@ typedef struct {
int32_t dnodeId;
} SDropMnodeInMsg;
typedef struct {
int32_t dnodeId;
} SCreateQnodeInMsg, SDropQnodeInMsg;
typedef struct {
int32_t dnodeId;
} SCreateSnodeInMsg, SDropSnodeInMsg;
typedef struct {
int32_t dnodeId;
} SCreateBnodeInMsg, SDropBnodeInMsg;
typedef struct {
int32_t dnodeId;
int32_t vgId;
......
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _TD_BNODE_H_
#define _TD_BNODE_H_
#ifdef __cplusplus
extern "C" {
#endif
/* ------------------------ TYPES EXPOSED ------------------------ */
typedef struct SDnode SDnode;
typedef struct SBnode SBnode;
typedef void (*SendMsgToDnodeFp)(SDnode *pDnode, struct SEpSet *epSet, struct SRpcMsg *rpcMsg);
typedef void (*SendMsgToMnodeFp)(SDnode *pDnode, struct SRpcMsg *rpcMsg);
typedef void (*SendRedirectMsgFp)(SDnode *pDnode, struct SRpcMsg *rpcMsg);
typedef struct {
int64_t numOfErrors;
} SBnodeLoad;
typedef struct {
int32_t sver;
} SBnodeCfg;
typedef struct {
int32_t dnodeId;
int64_t clusterId;
SBnodeCfg cfg;
SDnode *pDnode;
SendMsgToDnodeFp sendMsgToDnodeFp;
SendMsgToMnodeFp sendMsgToMnodeFp;
SendRedirectMsgFp sendRedirectMsgFp;
} SBnodeOpt;
/* ------------------------ SBnode ------------------------ */
/**
* @brief Start one Bnode in Dnode.
*
* @param pOption Option of the bnode.
* @return SBnode* The bnode object.
*/
SBnode *bndOpen(const SBnodeOpt *pOption);
/**
* @brief Stop Bnode in Dnode.
*
* @param pBnode The bnode object to close.
*/
void bndClose(SBnode *pBnode);
/**
* @brief Get the statistical information of Bnode
*
* @param pBnode The bnode object.
* @param pLoad Statistics of the bnode.
* @return int32_t 0 for success, -1 for failure.
*/
int32_t bndGetLoad(SBnode *pBnode, SBnodeLoad *pLoad);
/**
* @brief Process a query message.
*
* @param pBnode The bnode object.
* @param pMsgs The array of SRpcMsg
* @return int32_t 0 for success, -1 for failure
*/
int32_t bndProcessWMsgs(SBnode *pBnode, SArray *pMsgs);
#ifdef __cplusplus
}
#endif
#endif /*_TD_BNODE_H_*/
\ No newline at end of file
......@@ -29,6 +29,7 @@ typedef struct {
int32_t sver;
int16_t numOfCores;
int16_t numOfSupportVnodes;
int16_t numOfCommitThreads;
int8_t enableTelem;
int32_t statusInterval;
float numOfThreadsPerCore;
......
......@@ -19,52 +19,83 @@
#ifdef __cplusplus
extern "C" {
#endif
#include "trpc.h"
/* ------------------------ TYPES EXPOSED ------------------------ */
typedef struct SDnode SDnode;
typedef struct SQnode SQnode;
typedef void (*SendMsgToDnodeFp)(SDnode *pDnode, struct SEpSet *epSet, struct SRpcMsg *rpcMsg);
typedef void (*SendMsgToMnodeFp)(SDnode *pDnode, struct SRpcMsg *rpcMsg);
typedef void (*SendRedirectMsgFp)(SDnode *pDnode, struct SRpcMsg *rpcMsg);
typedef struct {
uint64_t numOfStartTask;
uint64_t numOfStopTask;
uint64_t numOfRecvedFetch;
uint64_t numOfSentHb;
uint64_t numOfSentFetch;
uint64_t numOfTaskInQueue;
uint64_t numOfFetchInQueue;
uint64_t numOfErrors;
} SQnodeStat;
int64_t numOfStartTask;
int64_t numOfStopTask;
int64_t numOfRecvedFetch;
int64_t numOfSentHb;
int64_t numOfSentFetch;
int64_t numOfTaskInQueue;
int64_t numOfFetchInQueue;
int64_t numOfErrors;
} SQnodeLoad;
typedef struct {
int32_t sver;
} SQnodeCfg;
typedef struct {
int32_t dnodeId;
int64_t clusterId;
SQnodeCfg cfg;
SDnode *pDnode;
SendMsgToDnodeFp sendMsgToDnodeFp;
SendMsgToMnodeFp sendMsgToMnodeFp;
SendRedirectMsgFp sendRedirectMsgFp;
} SQnodeOpt;
/* ------------------------ SQnode ------------------------ */
/**
* Start one Qnode in Dnode.
* @return Error Code.
* @brief Start one Qnode in Dnode.
*
* @param pOption Option of the qnode.
* @return SQnode* The qnode object.
*/
int32_t qnodeStart();
SQnode *qndOpen(const SQnodeOpt *pOption);
/**
* Stop Qnode in Dnode.
* @brief Stop Qnode in Dnode.
*
* @param qnodeId Qnode ID to stop, -1 for all Qnodes.
* @param pQnode The qnode object to close.
*/
void qnodeStop(int64_t qnodeId);
void qndClose(SQnode *pQnode);
/**
* Get the statistical information of Qnode
* @brief Get the statistical information of Qnode
*
* @param qnodeId Qnode ID to get statistics, -1 for all
* @param stat Statistical information.
* @return Error Code.
* @param pQnode The qnode object.
* @param pLoad Statistics of the qnode.
* @return int32_t 0 for success, -1 for failure.
*/
int32_t qnodeGetStatistics(int64_t qnodeId, SQnodeStat *stat);
int32_t qndGetLoad(SQnode *pQnode, SQnodeLoad *pLoad);
/**
* Interface for processing Qnode messages.
*
* @param pMsg Message to be processed.
* @return Error code
* @brief Process a query message.
*
* @param pQnode The qnode object.
* @param pMsg The request message
* @param pRsp The response message
* @return int32_t 0 for success, -1 for failure
*/
void qnodeProcessReq(SRpcMsg *pMsg);
int32_t qndProcessQueryReq(SQnode *pQnode, SRpcMsg *pMsg, SRpcMsg **pRsp);
/**
* @brief Process a fetch message.
*
* @param pQnode The qnode object.
* @param pMsg The request message
* @param pRsp The response message
* @return int32_t 0 for success, -1 for failure
*/
int32_t qndProcessFetchReq(SQnode *pQnode, SRpcMsg *pMsg, SRpcMsg **pRsp);
#ifdef __cplusplus
}
......
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _TD_SNODE_H_
#define _TD_SNODE_H_
#ifdef __cplusplus
extern "C" {
#endif
/* ------------------------ TYPES EXPOSED ------------------------ */
typedef struct SDnode SDnode;
typedef struct SSnode SSnode;
typedef void (*SendMsgToDnodeFp)(SDnode *pDnode, struct SEpSet *epSet, struct SRpcMsg *rpcMsg);
typedef void (*SendMsgToMnodeFp)(SDnode *pDnode, struct SRpcMsg *rpcMsg);
typedef void (*SendRedirectMsgFp)(SDnode *pDnode, struct SRpcMsg *rpcMsg);
typedef struct {
int64_t numOfErrors;
} SSnodeLoad;
typedef struct {
int32_t sver;
} SSnodeCfg;
typedef struct {
int32_t dnodeId;
int64_t clusterId;
SSnodeCfg cfg;
SDnode *pDnode;
SendMsgToDnodeFp sendMsgToDnodeFp;
SendMsgToMnodeFp sendMsgToMnodeFp;
SendRedirectMsgFp sendRedirectMsgFp;
} SSnodeOpt;
/* ------------------------ SSnode ------------------------ */
/**
* @brief Start one Snode in Dnode.
*
* @param pOption Option of the snode.
* @return SSnode* The snode object.
*/
SSnode *sndOpen(const SSnodeOpt *pOption);
/**
* @brief Stop Snode in Dnode.
*
* @param pSnode The snode object to close.
*/
void sndClose(SSnode *pSnode);
/**
* @brief Get the statistical information of Snode
*
* @param pSnode The snode object.
* @param pLoad Statistics of the snode.
* @return int32_t 0 for success, -1 for failure.
*/
int32_t sndGetLoad(SSnode *pSnode, SSnodeLoad *pLoad);
/**
* @brief Process a query message.
*
* @param pSnode The snode object.
* @param pMsg The request message
* @param pRsp The response message
* @return int32_t 0 for success, -1 for failure
*/
int32_t sndProcessWriteMsg(SSnode *pSnode, SRpcMsg *pMsg, SRpcMsg **pRsp);
#ifdef __cplusplus
}
#endif
#endif /*_TD_SNODE_H_*/
\ No newline at end of file
......@@ -256,9 +256,27 @@ int32_t* taosGetErrno();
#define TSDB_CODE_DND_MNODE_ID_NOT_FOUND TAOS_DEF_ERROR_CODE(0, 0x0423)
#define TSDB_CODE_DND_MNODE_READ_FILE_ERROR TAOS_DEF_ERROR_CODE(0, 0x0424)
#define TSDB_CODE_DND_MNODE_WRITE_FILE_ERROR TAOS_DEF_ERROR_CODE(0, 0x0425)
#define TSDB_CODE_DND_VNODE_TOO_MANY_VNODES TAOS_DEF_ERROR_CODE(0, 0x0430)
#define TSDB_CODE_DND_VNODE_READ_FILE_ERROR TAOS_DEF_ERROR_CODE(0, 0x0431)
#define TSDB_CODE_DND_VNODE_WRITE_FILE_ERROR TAOS_DEF_ERROR_CODE(0, 0x0432)
#define TSDB_CODE_DND_QNODE_ALREADY_DEPLOYED TAOS_DEF_ERROR_CODE(0, 0x0430)
#define TSDB_CODE_DND_QNODE_NOT_DEPLOYED TAOS_DEF_ERROR_CODE(0, 0x0431)
#define TSDB_CODE_DND_QNODE_ID_INVALID TAOS_DEF_ERROR_CODE(0, 0x0432)
#define TSDB_CODE_DND_QNODE_ID_NOT_FOUND TAOS_DEF_ERROR_CODE(0, 0x0433)
#define TSDB_CODE_DND_QNODE_READ_FILE_ERROR TAOS_DEF_ERROR_CODE(0, 0x0434)
#define TSDB_CODE_DND_QNODE_WRITE_FILE_ERROR TAOS_DEF_ERROR_CODE(0, 0x0435)
#define TSDB_CODE_DND_SNODE_ALREADY_DEPLOYED TAOS_DEF_ERROR_CODE(0, 0x0440)
#define TSDB_CODE_DND_SNODE_NOT_DEPLOYED TAOS_DEF_ERROR_CODE(0, 0x0441)
#define TSDB_CODE_DND_SNODE_ID_INVALID TAOS_DEF_ERROR_CODE(0, 0x0442)
#define TSDB_CODE_DND_SNODE_ID_NOT_FOUND TAOS_DEF_ERROR_CODE(0, 0x0443)
#define TSDB_CODE_DND_SNODE_READ_FILE_ERROR TAOS_DEF_ERROR_CODE(0, 0x0444)
#define TSDB_CODE_DND_SNODE_WRITE_FILE_ERROR TAOS_DEF_ERROR_CODE(0, 0x0445)
#define TSDB_CODE_DND_BNODE_ALREADY_DEPLOYED TAOS_DEF_ERROR_CODE(0, 0x0450)
#define TSDB_CODE_DND_BNODE_NOT_DEPLOYED TAOS_DEF_ERROR_CODE(0, 0x0451)
#define TSDB_CODE_DND_BNODE_ID_INVALID TAOS_DEF_ERROR_CODE(0, 0x0452)
#define TSDB_CODE_DND_BNODE_ID_NOT_FOUND TAOS_DEF_ERROR_CODE(0, 0x0453)
#define TSDB_CODE_DND_BNODE_READ_FILE_ERROR TAOS_DEF_ERROR_CODE(0, 0x0454)
#define TSDB_CODE_DND_BNODE_WRITE_FILE_ERROR TAOS_DEF_ERROR_CODE(0, 0x0455)
#define TSDB_CODE_DND_VNODE_TOO_MANY_VNODES TAOS_DEF_ERROR_CODE(0, 0x0460)
#define TSDB_CODE_DND_VNODE_READ_FILE_ERROR TAOS_DEF_ERROR_CODE(0, 0x0461)
#define TSDB_CODE_DND_VNODE_WRITE_FILE_ERROR TAOS_DEF_ERROR_CODE(0, 0x0462)
// vnode
#define TSDB_CODE_VND_ACTION_IN_PROGRESS TAOS_DEF_ERROR_CODE(0, 0x0500) //"Action in progress")
......@@ -377,132 +395,6 @@ int32_t* taosGetErrno();
#define TSDB_CODE_WAL_FILE_CORRUPTED TAOS_DEF_ERROR_CODE(0, 0x1001) //"WAL file is corrupted")
#define TSDB_CODE_WAL_SIZE_LIMIT TAOS_DEF_ERROR_CODE(0, 0x1002) //"WAL size exceeds limit")
// http
#define TSDB_CODE_HTTP_SERVER_OFFLINE TAOS_DEF_ERROR_CODE(0, 0x1100) //"http server is not onlin")
#define TSDB_CODE_HTTP_UNSUPPORT_URL TAOS_DEF_ERROR_CODE(0, 0x1101) //"url is not support")
#define TSDB_CODE_HTTP_INVALID_URL TAOS_DEF_ERROR_CODE(0, 0x1102) //invalid url format")
#define TSDB_CODE_HTTP_NO_ENOUGH_MEMORY TAOS_DEF_ERROR_CODE(0, 0x1103) //"no enough memory")
#define TSDB_CODE_HTTP_REQUSET_TOO_BIG TAOS_DEF_ERROR_CODE(0, 0x1104) //"request size is too big")
#define TSDB_CODE_HTTP_NO_AUTH_INFO TAOS_DEF_ERROR_CODE(0, 0x1105) //"no auth info input")
#define TSDB_CODE_HTTP_NO_MSG_INPUT TAOS_DEF_ERROR_CODE(0, 0x1106) //"request is empty")
#define TSDB_CODE_HTTP_NO_SQL_INPUT TAOS_DEF_ERROR_CODE(0, 0x1107) //"no sql input")
#define TSDB_CODE_HTTP_NO_EXEC_USEDB TAOS_DEF_ERROR_CODE(0, 0x1108) //"no need to execute use db cmd")
#define TSDB_CODE_HTTP_SESSION_FULL TAOS_DEF_ERROR_CODE(0, 0x1109) //"session list was full")
#define TSDB_CODE_HTTP_GEN_TAOSD_TOKEN_ERR TAOS_DEF_ERROR_CODE(0, 0x110A) //"generate taosd token error")
#define TSDB_CODE_HTTP_INVALID_MULTI_REQUEST TAOS_DEF_ERROR_CODE(0, 0x110B) //"size of multi request is 0")
#define TSDB_CODE_HTTP_CREATE_GZIP_FAILED TAOS_DEF_ERROR_CODE(0, 0x110C) //"failed to create gzip")
#define TSDB_CODE_HTTP_FINISH_GZIP_FAILED TAOS_DEF_ERROR_CODE(0, 0x110D) //"failed to finish gzip")
#define TSDB_CODE_HTTP_LOGIN_FAILED TAOS_DEF_ERROR_CODE(0, 0x110E) //"failed to login")
#define TSDB_CODE_HTTP_INVALID_VERSION TAOS_DEF_ERROR_CODE(0, 0x1120) //"invalid http version")
#define TSDB_CODE_HTTP_INVALID_CONTENT_LENGTH TAOS_DEF_ERROR_CODE(0, 0x1121) //"invalid content length")
#define TSDB_CODE_HTTP_INVALID_AUTH_TYPE TAOS_DEF_ERROR_CODE(0, 0x1122) //"invalid type of Authorization")
#define TSDB_CODE_HTTP_INVALID_AUTH_FORMAT TAOS_DEF_ERROR_CODE(0, 0x1123) //"invalid format of Authorization")
#define TSDB_CODE_HTTP_INVALID_BASIC_AUTH TAOS_DEF_ERROR_CODE(0, 0x1124) //"invalid basic Authorization")
#define TSDB_CODE_HTTP_INVALID_TAOSD_AUTH TAOS_DEF_ERROR_CODE(0, 0x1125) //"invalid taosd Authorization")
#define TSDB_CODE_HTTP_PARSE_METHOD_FAILED TAOS_DEF_ERROR_CODE(0, 0x1126) //"failed to parse method")
#define TSDB_CODE_HTTP_PARSE_TARGET_FAILED TAOS_DEF_ERROR_CODE(0, 0x1127) //"failed to parse target")
#define TSDB_CODE_HTTP_PARSE_VERSION_FAILED TAOS_DEF_ERROR_CODE(0, 0x1128) //"failed to parse http version")
#define TSDB_CODE_HTTP_PARSE_SP_FAILED TAOS_DEF_ERROR_CODE(0, 0x1129) //"failed to parse sp")
#define TSDB_CODE_HTTP_PARSE_STATUS_FAILED TAOS_DEF_ERROR_CODE(0, 0x112A) //"failed to parse status")
#define TSDB_CODE_HTTP_PARSE_PHRASE_FAILED TAOS_DEF_ERROR_CODE(0, 0x112B) //"failed to parse phrase")
#define TSDB_CODE_HTTP_PARSE_CRLF_FAILED TAOS_DEF_ERROR_CODE(0, 0x112C) //"failed to parse crlf")
#define TSDB_CODE_HTTP_PARSE_HEADER_FAILED TAOS_DEF_ERROR_CODE(0, 0x112D) //"failed to parse header")
#define TSDB_CODE_HTTP_PARSE_HEADER_KEY_FAILED TAOS_DEF_ERROR_CODE(0, 0x112E) //"failed to parse header key")
#define TSDB_CODE_HTTP_PARSE_HEADER_VAL_FAILED TAOS_DEF_ERROR_CODE(0, 0x112F) //"failed to parse header val")
#define TSDB_CODE_HTTP_PARSE_CHUNK_SIZE_FAILED TAOS_DEF_ERROR_CODE(0, 0x1130) //"failed to parse chunk size")
#define TSDB_CODE_HTTP_PARSE_CHUNK_FAILED TAOS_DEF_ERROR_CODE(0, 0x1131) //"failed to parse chunk")
#define TSDB_CODE_HTTP_PARSE_END_FAILED TAOS_DEF_ERROR_CODE(0, 0x1132) //"failed to parse end section")
#define TSDB_CODE_HTTP_PARSE_INVALID_STATE TAOS_DEF_ERROR_CODE(0, 0x1134) //"invalid parse state")
#define TSDB_CODE_HTTP_PARSE_ERROR_STATE TAOS_DEF_ERROR_CODE(0, 0x1135) //"failed to parse error section")
#define TSDB_CODE_HTTP_GC_QUERY_NULL TAOS_DEF_ERROR_CODE(0, 0x1150) //"query size is 0")
#define TSDB_CODE_HTTP_GC_QUERY_SIZE TAOS_DEF_ERROR_CODE(0, 0x1151) //"query size can not more than 100")
#define TSDB_CODE_HTTP_GC_REQ_PARSE_ERROR TAOS_DEF_ERROR_CODE(0, 0x1152) //"parse grafana json error")
#define TSDB_CODE_HTTP_TG_DB_NOT_INPUT TAOS_DEF_ERROR_CODE(0, 0x1160) //"database name can not be null")
#define TSDB_CODE_HTTP_TG_DB_TOO_LONG TAOS_DEF_ERROR_CODE(0, 0x1161) //"database name too long")
#define TSDB_CODE_HTTP_TG_INVALID_JSON TAOS_DEF_ERROR_CODE(0, 0x1162) //"invalid telegraf json fromat")
#define TSDB_CODE_HTTP_TG_METRICS_NULL TAOS_DEF_ERROR_CODE(0, 0x1163) //"metrics size is 0")
#define TSDB_CODE_HTTP_TG_METRICS_SIZE TAOS_DEF_ERROR_CODE(0, 0x1164) //"metrics size can not more than 1K")
#define TSDB_CODE_HTTP_TG_METRIC_NULL TAOS_DEF_ERROR_CODE(0, 0x1165) //"metric name not find")
#define TSDB_CODE_HTTP_TG_METRIC_TYPE TAOS_DEF_ERROR_CODE(0, 0x1166) //"metric name type should be string")
#define TSDB_CODE_HTTP_TG_METRIC_NAME_NULL TAOS_DEF_ERROR_CODE(0, 0x1167) //"metric name length is 0")
#define TSDB_CODE_HTTP_TG_METRIC_NAME_LONG TAOS_DEF_ERROR_CODE(0, 0x1168) //"metric name length too long")
#define TSDB_CODE_HTTP_TG_TIMESTAMP_NULL TAOS_DEF_ERROR_CODE(0, 0x1169) //"timestamp not find")
#define TSDB_CODE_HTTP_TG_TIMESTAMP_TYPE TAOS_DEF_ERROR_CODE(0, 0x116A) //"timestamp type should be integer")
#define TSDB_CODE_HTTP_TG_TIMESTAMP_VAL_NULL TAOS_DEF_ERROR_CODE(0, 0x116B) //"timestamp value smaller than 0")
#define TSDB_CODE_HTTP_TG_TAGS_NULL TAOS_DEF_ERROR_CODE(0, 0x116C) //"tags not find")
#define TSDB_CODE_HTTP_TG_TAGS_SIZE_0 TAOS_DEF_ERROR_CODE(0, 0x116D) //"tags size is 0")
#define TSDB_CODE_HTTP_TG_TAGS_SIZE_LONG TAOS_DEF_ERROR_CODE(0, 0x116E) //"tags size too long")
#define TSDB_CODE_HTTP_TG_TAG_NULL TAOS_DEF_ERROR_CODE(0, 0x116F) //"tag is null")
#define TSDB_CODE_HTTP_TG_TAG_NAME_NULL TAOS_DEF_ERROR_CODE(0, 0x1170) //"tag name is null")
#define TSDB_CODE_HTTP_TG_TAG_NAME_SIZE TAOS_DEF_ERROR_CODE(0, 0x1171) //"tag name length too long")
#define TSDB_CODE_HTTP_TG_TAG_VALUE_TYPE TAOS_DEF_ERROR_CODE(0, 0x1172) //"tag value type should be number or string")
#define TSDB_CODE_HTTP_TG_TAG_VALUE_NULL TAOS_DEF_ERROR_CODE(0, 0x1173) //"tag value is null")
#define TSDB_CODE_HTTP_TG_TABLE_NULL TAOS_DEF_ERROR_CODE(0, 0x1174) //"table is null")
#define TSDB_CODE_HTTP_TG_TABLE_SIZE TAOS_DEF_ERROR_CODE(0, 0x1175) //"table name length too long")
#define TSDB_CODE_HTTP_TG_FIELDS_NULL TAOS_DEF_ERROR_CODE(0, 0x1176) //"fields not find")
#define TSDB_CODE_HTTP_TG_FIELDS_SIZE_0 TAOS_DEF_ERROR_CODE(0, 0x1177) //"fields size is 0")
#define TSDB_CODE_HTTP_TG_FIELDS_SIZE_LONG TAOS_DEF_ERROR_CODE(0, 0x1178) //"fields size too long")
#define TSDB_CODE_HTTP_TG_FIELD_NULL TAOS_DEF_ERROR_CODE(0, 0x1179) //"field is null")
#define TSDB_CODE_HTTP_TG_FIELD_NAME_NULL TAOS_DEF_ERROR_CODE(0, 0x117A) //"field name is null")
#define TSDB_CODE_HTTP_TG_FIELD_NAME_SIZE TAOS_DEF_ERROR_CODE(0, 0x117B) //"field name length too long")
#define TSDB_CODE_HTTP_TG_FIELD_VALUE_TYPE TAOS_DEF_ERROR_CODE(0, 0x117C) //"field value type should be number or string")
#define TSDB_CODE_HTTP_TG_FIELD_VALUE_NULL TAOS_DEF_ERROR_CODE(0, 0x117D) //"field value is null")
#define TSDB_CODE_HTTP_TG_HOST_NOT_STRING TAOS_DEF_ERROR_CODE(0, 0x117E) //"host type should be string")
#define TSDB_CODE_HTTP_TG_STABLE_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x117F) //"stable not exist")
#define TSDB_CODE_HTTP_OP_DB_NOT_INPUT TAOS_DEF_ERROR_CODE(0, 0x1190) //"database name can not be null")
#define TSDB_CODE_HTTP_OP_DB_TOO_LONG TAOS_DEF_ERROR_CODE(0, 0x1191) //"database name too long")
#define TSDB_CODE_HTTP_OP_INVALID_JSON TAOS_DEF_ERROR_CODE(0, 0x1192) //"invalid opentsdb json fromat")
#define TSDB_CODE_HTTP_OP_METRICS_NULL TAOS_DEF_ERROR_CODE(0, 0x1193) //"metrics size is 0")
#define TSDB_CODE_HTTP_OP_METRICS_SIZE TAOS_DEF_ERROR_CODE(0, 0x1194) //"metrics size can not more than 10K")
#define TSDB_CODE_HTTP_OP_METRIC_NULL TAOS_DEF_ERROR_CODE(0, 0x1195) //"metric name not find")
#define TSDB_CODE_HTTP_OP_METRIC_TYPE TAOS_DEF_ERROR_CODE(0, 0x1196) //"metric name type should be string")
#define TSDB_CODE_HTTP_OP_METRIC_NAME_NULL TAOS_DEF_ERROR_CODE(0, 0x1197) //"metric name length is 0")
#define TSDB_CODE_HTTP_OP_METRIC_NAME_LONG TAOS_DEF_ERROR_CODE(0, 0x1198) //"metric name length can not more than 22")
#define TSDB_CODE_HTTP_OP_TIMESTAMP_NULL TAOS_DEF_ERROR_CODE(0, 0x1199) //"timestamp not find")
#define TSDB_CODE_HTTP_OP_TIMESTAMP_TYPE TAOS_DEF_ERROR_CODE(0, 0x119A) //"timestamp type should be integer")
#define TSDB_CODE_HTTP_OP_TIMESTAMP_VAL_NULL TAOS_DEF_ERROR_CODE(0, 0x119B) //"timestamp value smaller than 0")
#define TSDB_CODE_HTTP_OP_TAGS_NULL TAOS_DEF_ERROR_CODE(0, 0x119C) //"tags not find")
#define TSDB_CODE_HTTP_OP_TAGS_SIZE_0 TAOS_DEF_ERROR_CODE(0, 0x119D) //"tags size is 0")
#define TSDB_CODE_HTTP_OP_TAGS_SIZE_LONG TAOS_DEF_ERROR_CODE(0, 0x119E) //"tags size too long")
#define TSDB_CODE_HTTP_OP_TAG_NULL TAOS_DEF_ERROR_CODE(0, 0x119F) //"tag is null")
#define TSDB_CODE_HTTP_OP_TAG_NAME_NULL TAOS_DEF_ERROR_CODE(0, 0x11A0) //"tag name is null")
#define TSDB_CODE_HTTP_OP_TAG_NAME_SIZE TAOS_DEF_ERROR_CODE(0, 0x11A1) //"tag name length too long")
#define TSDB_CODE_HTTP_OP_TAG_VALUE_TYPE TAOS_DEF_ERROR_CODE(0, 0x11A2) //"tag value type should be boolean number or string")
#define TSDB_CODE_HTTP_OP_TAG_VALUE_NULL TAOS_DEF_ERROR_CODE(0, 0x11A3) //"tag value is null")
#define TSDB_CODE_HTTP_OP_TAG_VALUE_TOO_LONG TAOS_DEF_ERROR_CODE(0, 0x11A4) //"tag value can not more than 64")
#define TSDB_CODE_HTTP_OP_VALUE_NULL TAOS_DEF_ERROR_CODE(0, 0x11A5) //"value not find")
#define TSDB_CODE_HTTP_OP_VALUE_TYPE TAOS_DEF_ERROR_CODE(0, 0x11A6) //"value type should be boolean number or string")
#define TSDB_CODE_HTTP_REQUEST_JSON_ERROR TAOS_DEF_ERROR_CODE(0, 0x1F00) //"http request json error")
// odbc
#define TSDB_CODE_ODBC_OOM TAOS_DEF_ERROR_CODE(0, 0x2100) //"out of memory")
#define TSDB_CODE_ODBC_CONV_CHAR_NOT_NUM TAOS_DEF_ERROR_CODE(0, 0x2101) //"convertion not a valid literal input")
#define TSDB_CODE_ODBC_CONV_UNDEF TAOS_DEF_ERROR_CODE(0, 0x2102) //"convertion undefined")
#define TSDB_CODE_ODBC_CONV_TRUNC_FRAC TAOS_DEF_ERROR_CODE(0, 0x2103) //"convertion fractional truncated")
#define TSDB_CODE_ODBC_CONV_TRUNC TAOS_DEF_ERROR_CODE(0, 0x2104) //"convertion truncated")
#define TSDB_CODE_ODBC_CONV_NOT_SUPPORT TAOS_DEF_ERROR_CODE(0, 0x2105) //"convertion not supported")
#define TSDB_CODE_ODBC_CONV_OOR TAOS_DEF_ERROR_CODE(0, 0x2106) //"convertion numeric value out of range")
#define TSDB_CODE_ODBC_OUT_OF_RANGE TAOS_DEF_ERROR_CODE(0, 0x2107) //"out of range")
#define TSDB_CODE_ODBC_NOT_SUPPORT TAOS_DEF_ERROR_CODE(0, 0x2108) //"not supported yet")
#define TSDB_CODE_ODBC_INVALID_HANDLE TAOS_DEF_ERROR_CODE(0, 0x2109) //"invalid handle")
#define TSDB_CODE_ODBC_NO_RESULT TAOS_DEF_ERROR_CODE(0, 0x210a) //"no result set")
#define TSDB_CODE_ODBC_NO_FIELDS TAOS_DEF_ERROR_CODE(0, 0x210b) //"no fields returned")
#define TSDB_CODE_ODBC_INVALID_CURSOR TAOS_DEF_ERROR_CODE(0, 0x210c) //"invalid cursor")
#define TSDB_CODE_ODBC_STATEMENT_NOT_READY TAOS_DEF_ERROR_CODE(0, 0x210d) //"statement not ready")
#define TSDB_CODE_ODBC_CONNECTION_BUSY TAOS_DEF_ERROR_CODE(0, 0x210e) //"connection still busy")
#define TSDB_CODE_ODBC_BAD_CONNSTR TAOS_DEF_ERROR_CODE(0, 0x210f) //"bad connection string")
#define TSDB_CODE_ODBC_BAD_ARG TAOS_DEF_ERROR_CODE(0, 0x2110) //"bad argument")
#define TSDB_CODE_ODBC_CONV_NOT_VALID_TS TAOS_DEF_ERROR_CODE(0, 0x2111) //"not a valid timestamp")
#define TSDB_CODE_ODBC_CONV_SRC_TOO_LARGE TAOS_DEF_ERROR_CODE(0, 0x2112) //"src too large")
#define TSDB_CODE_ODBC_CONV_SRC_BAD_SEQ TAOS_DEF_ERROR_CODE(0, 0x2113) //"src bad sequence")
#define TSDB_CODE_ODBC_CONV_SRC_INCOMPLETE TAOS_DEF_ERROR_CODE(0, 0x2114) //"src incomplete")
#define TSDB_CODE_ODBC_CONV_SRC_GENERAL TAOS_DEF_ERROR_CODE(0, 0x2115) //"src general")
// tfs
#define TSDB_CODE_FS_OUT_OF_MEMORY TAOS_DEF_ERROR_CODE(0, 0x2200) //"tfs out of memory")
#define TSDB_CODE_FS_INVLD_CFG TAOS_DEF_ERROR_CODE(0, 0x2201) //"tfs invalid mount config")
......
add_subdirectory(mnode)
add_subdirectory(vnode)
add_subdirectory(qnode)
add_subdirectory(snode)
add_subdirectory(bnode)
add_subdirectory(mgmt)
\ No newline at end of file
aux_source_directory(src BNODE_SRC)
add_library(bnode ${BNODE_SRC})
target_include_directories(
bnode
PUBLIC "${CMAKE_SOURCE_DIR}/include/dnode/bnode"
private "${CMAKE_CURRENT_SOURCE_DIR}/inc"
)
target_link_libraries(
bnode
PRIVATE transport
PRIVATE os
PRIVATE common
PRIVATE util
)
\ No newline at end of file
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _TD_BNODE_INT_H_
#define _TD_BNODE_INT_H_
#include "os.h"
#include "tarray.h"
#include "tlog.h"
#include "tmsg.h"
#include "trpc.h"
#include "bnode.h"
#ifdef __cplusplus
extern "C" {
#endif
typedef struct SBnode {
int32_t dnodeId;
int64_t clusterId;
SBnodeCfg cfg;
SendMsgToDnodeFp sendMsgToDnodeFp;
SendMsgToMnodeFp sendMsgToMnodeFp;
SendRedirectMsgFp sendRedirectMsgFp;
} SBnode;
#ifdef __cplusplus
}
#endif
#endif /*_TD_BNODE_INT_H_*/
\ No newline at end of file
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "bndInt.h"
SBnode *bndOpen(const SBnodeOpt *pOption) {
SBnode *pBnode = calloc(1, sizeof(SBnode));
return pBnode;
}
void bndClose(SBnode *pBnode) { free(pBnode); }
int32_t bndGetLoad(SBnode *pBnode, SBnodeLoad *pLoad) { return 0; }
int32_t bndProcessWMsgs(SBnode *pBnode, SArray *pMsgs) { return 0; }
......@@ -140,6 +140,7 @@ void dmnInitOption(SDnodeOpt *pOption) {
pOption->sver = 30000000; //3.0.0.0
pOption->numOfCores = tsNumOfCores;
pOption->numOfSupportVnodes = 1;
pOption->numOfCommitThreads = 1;
pOption->statusInterval = tsStatusInterval;
pOption->numOfThreadsPerCore = tsNumOfThreadsPerCore;
pOption->ratioOfQueryCores = tsRatioOfQueryCores;
......
......@@ -5,6 +5,9 @@ target_link_libraries(
PUBLIC cjson
PUBLIC mnode
PUBLIC vnode
PUBLIC qnode
PUBLIC snode
PUBLIC bnode
PUBLIC wal
PUBLIC sync
PUBLIC taos
......
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _TD_DND_BNODE_H_
#define _TD_DND_BNODE_H_
#ifdef __cplusplus
extern "C" {
#endif
#include "dndInt.h"
int32_t dndInitBnode(SDnode *pDnode);
void dndCleanupBnode(SDnode *pDnode);
ioid dndProcessBnodeWriteMsg(SDnode *pDnode, SRpcMsg *pMsg, SEpSet *pEpSet);
int32_t dndProcessCreateBnodeReq(SDnode *pDnode, SRpcMsg *pRpcMsg);
int32_t dndProcessDropBnodeReq(SDnode *pDnode, SRpcMsg *pRpcMsg);
#ifdef __cplusplus
}
#endif
#endif /*_TD_DND_BNODE_H_*/
\ No newline at end of file
......@@ -20,8 +20,11 @@
extern "C" {
#endif
#include "cJSON.h"
#include "os.h"
#include "cJSON.h"
#include "tcache.h"
#include "tcrc32c.h"
#include "tep.h"
#include "thash.h"
#include "tlockfree.h"
......@@ -34,7 +37,11 @@ extern "C" {
#include "tworker.h"
#include "dnode.h"
#include "bnode.h"
#include "mnode.h"
#include "qnode.h"
#include "snode.h"
#include "vnode.h"
extern int32_t dDebugFlag;
......@@ -52,7 +59,6 @@ typedef void (*DndMsgFp)(SDnode *pDnode, SRpcMsg *pMsg, SEpSet *pEps);
typedef struct {
char *dnode;
char *mnode;
char *qnode;
char *snode;
char *bnode;
char *vnodes;
......@@ -93,6 +99,41 @@ typedef struct {
SWorkerPool syncPool;
} SMnodeMgmt;
typedef struct {
int32_t refCount;
int8_t deployed;
int8_t dropped;
char *file;
SQnode *pQnode;
SRWLatch latch;
taos_queue pQueryQ;
taos_queue pFetchQ;
SWorkerPool queryPool;
SWorkerPool fetchPool;
} SQnodeMgmt;
typedef struct {
int32_t refCount;
int8_t deployed;
int8_t dropped;
char *file;
SSnode *pSnode;
SRWLatch latch;
taos_queue pWriteQ;
SWorkerPool writePool;
} SSnodeMgmt;
typedef struct {
int32_t refCount;
int8_t deployed;
int8_t dropped;
char *file;
SBnode *pBnode;
SRWLatch latch;
taos_queue pWriteQ;
SMWorkerPool writePool;
} SBnodeMgmt;
typedef struct {
SHashObj *hash;
int32_t openVnodes;
......@@ -117,6 +158,9 @@ typedef struct SDnode {
FileFd lockFd;
SDnodeMgmt dmgmt;
SMnodeMgmt mmgmt;
SQnodeMgmt qmgmt;
SSnodeMgmt smgmt;
SBnodeMgmt bmgmt;
SVnodesMgmt vmgmt;
STransMgmt tmgmt;
SStartupMsg startup;
......
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _TD_DND_QNODE_H_
#define _TD_DND_QNODE_H_
#ifdef __cplusplus
extern "C" {
#endif
#include "dndInt.h"
int32_t dndInitQnode(SDnode *pDnode);
void dndCleanupQnode(SDnode *pDnode);
void dndProcessQnodeQueryMsg(SDnode *pDnode, SRpcMsg *pMsg, SEpSet *pEpSet);
void dndProcessQnodeFetchMsg(SDnode *pDnode, SRpcMsg *pMsg, SEpSet *pEpSet);
int32_t dndProcessCreateQnodeReq(SDnode *pDnode, SRpcMsg *pRpcMsg);
int32_t dndProcessDropQnodeReq(SDnode *pDnode, SRpcMsg *pRpcMsg);
#ifdef __cplusplus
}
#endif
#endif /*_TD_DND_QNODE_H_*/
\ No newline at end of file
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _TD_DND_SNODE_H_
#define _TD_DND_SNODE_H_
#ifdef __cplusplus
extern "C" {
#endif
#include "dndInt.h"
int32_t dndInitSnode(SDnode *pDnode);
void dndCleanupSnode(SDnode *pDnode);
void dndProcessSnodeWriteMsg(SDnode *pDnode, SRpcMsg *pMsg, SEpSet *pEpSet);
int32_t dndProcessCreateSnodeReq(SDnode *pDnode, SRpcMsg *pRpcMsg);
int32_t dndProcessDropSnodeReq(SDnode *pDnode, SRpcMsg *pRpcMsg);
#ifdef __cplusplus
}
#endif
#endif /*_TD_DND_SNODE_H_*/
\ No newline at end of file
......@@ -796,6 +796,7 @@ void dndCleanupMnode(SDnode *pDnode) {
if (pMgmt->pMnode) dndStopMnodeWorker(pDnode);
tfree(pMgmt->file);
mndClose(pMgmt->pMnode);
pMgmt->pMnode = NULL;
dInfo("dnode-mnode is cleaned up");
}
......
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http:www.gnu.org/licenses/>.
*/
#define _DEFAULT_SOURCE
#include "dndQnode.h"
#include "dndDnode.h"
#include "dndTransport.h"
static int32_t dndInitQnodeQueryWorker(SDnode *pDnode);
static int32_t dndInitQnodeFetchWorker(SDnode *pDnode);
static void dndCleanupQnodeQueryWorker(SDnode *pDnode);
static void dndCleanupQnodeFetchWorker(SDnode *pDnode);
static int32_t dndAllocQnodeQueryQueue(SDnode *pDnode);
static int32_t dndAllocQnodeFetchQueue(SDnode *pDnode);
static void dndFreeQnodeQueryQueue(SDnode *pDnode);
static void dndFreeQnodeFetchQueue(SDnode *pDnode);
static void dndProcessQnodeQueue(SDnode *pDnode, SRpcMsg *pMsg);
static int32_t dndWriteQnodeMsgToQueue(SQnode *pQnode, taos_queue pQueue, SRpcMsg *pRpcMsg);
static int32_t dndStartQnodeWorker(SDnode *pDnode);
static void dndStopQnodeWorker(SDnode *pDnode);
static SQnode *dndAcquireQnode(SDnode *pDnode);
static void dndReleaseQnode(SDnode *pDnode, SQnode *pQnode);
static int32_t dndReadQnodeFile(SDnode *pDnode);
static int32_t dndWriteQnodeFile(SDnode *pDnode);
static int32_t dndOpenQnode(SDnode *pDnode);
static int32_t dndDropQnode(SDnode *pDnode);
static SQnode *dndAcquireQnode(SDnode *pDnode) {
SQnodeMgmt *pMgmt = &pDnode->qmgmt;
SQnode *pQnode = NULL;
int32_t refCount = 0;
taosRLockLatch(&pMgmt->latch);
if (pMgmt->deployed && !pMgmt->dropped) {
refCount = atomic_add_fetch_32(&pMgmt->refCount, 1);
pQnode = pMgmt->pQnode;
} else {
terrno = TSDB_CODE_DND_QNODE_NOT_DEPLOYED;
}
taosRUnLockLatch(&pMgmt->latch);
if (pQnode != NULL) {
dTrace("acquire qnode, refCount:%d", refCount);
}
return pQnode;
}
static void dndReleaseQnode(SDnode *pDnode, SQnode *pQnode) {
SQnodeMgmt *pMgmt = &pDnode->qmgmt;
int32_t refCount = 0;
taosRLockLatch(&pMgmt->latch);
if (pQnode != NULL) {
refCount = atomic_sub_fetch_32(&pMgmt->refCount, 1);
}
taosRUnLockLatch(&pMgmt->latch);
if (pQnode != NULL) {
dTrace("release qnode, refCount:%d", refCount);
}
}
static int32_t dndReadQnodeFile(SDnode *pDnode) {
SQnodeMgmt *pMgmt = &pDnode->qmgmt;
int32_t code = TSDB_CODE_DND_QNODE_READ_FILE_ERROR;
int32_t len = 0;
int32_t maxLen = 4096;
char *content = calloc(1, maxLen + 1);
cJSON *root = NULL;
FILE *fp = fopen(pMgmt->file, "r");
if (fp == NULL) {
dDebug("file %s not exist", pMgmt->file);
code = 0;
goto PRASE_MNODE_OVER;
}
len = (int32_t)fread(content, 1, maxLen, fp);
if (len <= 0) {
dError("failed to read %s since content is null", pMgmt->file);
goto PRASE_MNODE_OVER;
}
content[len] = 0;
root = cJSON_Parse(content);
if (root == NULL) {
dError("failed to read %s since invalid json format", pMgmt->file);
goto PRASE_MNODE_OVER;
}
cJSON *deployed = cJSON_GetObjectItem(root, "deployed");
if (!deployed || deployed->type != cJSON_Number) {
dError("failed to read %s since deployed not found", pMgmt->file);
goto PRASE_MNODE_OVER;
}
pMgmt->deployed = deployed->valueint;
cJSON *dropped = cJSON_GetObjectItem(root, "dropped");
if (!dropped || dropped->type != cJSON_Number) {
dError("failed to read %s since dropped not found", pMgmt->file);
goto PRASE_MNODE_OVER;
}
pMgmt->dropped = dropped->valueint;
code = 0;
dDebug("succcessed to read file %s, deployed:%d dropped:%d", pMgmt->file, pMgmt->deployed, pMgmt->dropped);
PRASE_MNODE_OVER:
if (content != NULL) free(content);
if (root != NULL) cJSON_Delete(root);
if (fp != NULL) fclose(fp);
terrno = code;
return code;
}
static int32_t dndWriteQnodeFile(SDnode *pDnode) {
SQnodeMgmt *pMgmt = &pDnode->qmgmt;
char file[PATH_MAX + 20] = {0};
snprintf(file, sizeof(file), "%s.bak", pMgmt->file);
FILE *fp = fopen(file, "w");
if (fp == NULL) {
terrno = TSDB_CODE_DND_QNODE_WRITE_FILE_ERROR;
dError("failed to write %s since %s", file, terrstr());
return -1;
}
int32_t len = 0;
int32_t maxLen = 4096;
char *content = calloc(1, maxLen + 1);
len += snprintf(content + len, maxLen - len, "{\n");
len += snprintf(content + len, maxLen - len, " \"deployed\": %d,\n", pMgmt->deployed);
len += snprintf(content + len, maxLen - len, " \"dropped\": %d\n", pMgmt->dropped);
len += snprintf(content + len, maxLen - len, "}\n");
fwrite(content, 1, len, fp);
taosFfetchFile(fileno(fp));
fclose(fp);
free(content);
if (taosRenameFile(file, pMgmt->file) != 0) {
terrno = TSDB_CODE_DND_QNODE_WRITE_FILE_ERROR;
dError("failed to rename %s since %s", pMgmt->file, terrstr());
return -1;
}
dInfo("successed to write %s, deployed:%d dropped:%d", pMgmt->file, pMgmt->deployed, pMgmt->dropped);
return 0;
}
static int32_t dndStartQnodeWorker(SDnode *pDnode) {
if (dndInitQnodeQueryWorker(pDnode) != 0) {
dError("failed to start qnode query worker since %s", terrstr());
return -1;
}
if (dndInitQnodeFetchWorker(pDnode) != 0) {
dError("failed to start qnode fetch worker since %s", terrstr());
return -1;
}
if (dndAllocQnodeQueryQueue(pDnode) != 0) {
dError("failed to alloc qnode query queue since %s", terrstr());
return -1;
}
if (dndAllocQnodeFetchQueue(pDnode) != 0) {
dError("failed to alloc qnode fetch queue since %s", terrstr());
return -1;
}
return 0;
}
static void dndStopQnodeWorker(SDnode *pDnode) {
SQnodeMgmt *pMgmt = &pDnode->qmgmt;
taosWLockLatch(&pMgmt->latch);
pMgmt->deployed = 0;
taosWUnLockLatch(&pMgmt->latch);
while (pMgmt->refCount > 1) taosMsleep(10);
while (!taosQueueEmpty(pMgmt->pQueryQ)) taosMsleep(10);
while (!taosQueueEmpty(pMgmt->pFetchQ)) taosMsleep(10);
dndCleanupQnodeQueryWorker(pDnode);
dndCleanupQnodeFetchWorker(pDnode);
dndFreeQnodeQueryQueue(pDnode);
dndFreeQnodeFetchQueue(pDnode);
}
static void dndBuildQnodeOption(SDnode *pDnode, SQnodeOpt *pOption) {
pOption->pDnode = pDnode;
pOption->sendMsgToDnodeFp = dndSendMsgToDnode;
pOption->sendMsgToMnodeFp = dndSendMsgToMnode;
pOption->sendRedirectMsgFp = dndSendRedirectMsg;
pOption->dnodeId = dndGetDnodeId(pDnode);
pOption->clusterId = dndGetClusterId(pDnode);
pOption->cfg.sver = pDnode->opt.sver;
}
static int32_t dndOpenQnode(SDnode *pDnode) {
SQnodeMgmt *pMgmt = &pDnode->qmgmt;
SQnodeOpt option = {0};
dndBuildQnodeOption(pDnode, &option);
SQnode *pQnode = qndOpen(&option);
if (pQnode == NULL) {
dError("failed to open qnode since %s", terrstr());
return -1;
}
pMgmt->deployed = 1;
int32_t code = dndWriteQnodeFile(pDnode);
if (code != 0) {
dError("failed to write qnode file since %s", terrstr());
code = terrno;
pMgmt->deployed = 0;
qndClose(pQnode);
// qndDestroy(pDnode->dir.qnode);
terrno = code;
return -1;
}
code = dndStartQnodeWorker(pDnode);
if (code != 0) {
dError("failed to start qnode worker since %s", terrstr());
code = terrno;
pMgmt->deployed = 0;
dndStopQnodeWorker(pDnode);
qndClose(pQnode);
// qndDestroy(pDnode->dir.qnode);
terrno = code;
return -1;
}
taosWLockLatch(&pMgmt->latch);
pMgmt->pQnode = pQnode;
pMgmt->deployed = 1;
taosWUnLockLatch(&pMgmt->latch);
dInfo("qnode open successfully");
return 0;
}
static int32_t dndDropQnode(SDnode *pDnode) {
SQnodeMgmt *pMgmt = &pDnode->qmgmt;
SQnode *pQnode = dndAcquireQnode(pDnode);
if (pQnode == NULL) {
dError("failed to drop qnode since %s", terrstr());
return -1;
}
taosRLockLatch(&pMgmt->latch);
pMgmt->dropped = 1;
taosRUnLockLatch(&pMgmt->latch);
if (dndWriteQnodeFile(pDnode) != 0) {
taosRLockLatch(&pMgmt->latch);
pMgmt->dropped = 0;
taosRUnLockLatch(&pMgmt->latch);
dndReleaseQnode(pDnode, pQnode);
dError("failed to drop qnode since %s", terrstr());
return -1;
}
dndReleaseQnode(pDnode, pQnode);
dndStopQnodeWorker(pDnode);
dndWriteQnodeFile(pDnode);
qndClose(pQnode);
pMgmt->pQnode = NULL;
// qndDestroy(pDnode->dir.qnode);
return 0;
}
int32_t dndProcessCreateQnodeReq(SDnode *pDnode, SRpcMsg *pRpcMsg) {
SCreateQnodeInMsg *pMsg = pRpcMsg->pCont;
pMsg->dnodeId = htonl(pMsg->dnodeId);
if (pMsg->dnodeId != dndGetDnodeId(pDnode)) {
terrno = TSDB_CODE_DND_QNODE_ID_INVALID;
return -1;
} else {
return dndOpenQnode(pDnode);
}
}
int32_t dndProcessDropQnodeReq(SDnode *pDnode, SRpcMsg *pRpcMsg) {
SDropQnodeInMsg *pMsg = pRpcMsg->pCont;
pMsg->dnodeId = htonl(pMsg->dnodeId);
if (pMsg->dnodeId != dndGetDnodeId(pDnode)) {
terrno = TSDB_CODE_DND_QNODE_ID_INVALID;
return -1;
} else {
return dndDropQnode(pDnode);
}
}
static void dndProcessQnodeQueue(SDnode *pDnode, SRpcMsg *pMsg) {
SQnodeMgmt *pMgmt = &pDnode->qmgmt;
SRpcMsg *pRsp = NULL;
int32_t code = 0;
SQnode *pQnode = dndAcquireQnode(pDnode);
if (pQnode == NULL) {
code = -1;
} else {
code = qndProcessQueryReq(pQnode, pMsg, &pRsp);
}
if (pRsp != NULL) {
pRsp->ahandle = pMsg->ahandle;
rpcSendResponse(pRsp);
free(pRsp);
} else {
if (code != 0) code = terrno;
SRpcMsg rpcRsp = {.handle = pMsg->handle, .ahandle = pMsg->ahandle, .code = code};
rpcSendResponse(&rpcRsp);
}
rpcFreeCont(pMsg->pCont);
taosFreeQitem(pMsg);
}
static int32_t dndWriteQnodeMsgToQueue(SQnode *pQnode, taos_queue pQueue, SRpcMsg *pRpcMsg) {
int32_t code = 0;
if (pQnode == NULL || pQueue == NULL) {
code = TSDB_CODE_DND_QNODE_NOT_DEPLOYED;
} else {
SRpcMsg *pMsg = taosAllocateQitem(sizeof(SRpcMsg));
if (pMsg == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
} else {
*pMsg = *pRpcMsg;
if (taosWriteQitem(pQueue, pMsg) != 0) {
code = TSDB_CODE_OUT_OF_MEMORY;
}
}
}
if (code != 0) {
if (pRpcMsg->msgType & 1u) {
SRpcMsg rsp = {.handle = pRpcMsg->handle, .code = code};
rpcSendResponse(&rsp);
}
rpcFreeCont(pRpcMsg->pCont);
}
}
void dndProcessQnodeQueryMsg(SDnode *pDnode, SRpcMsg *pMsg, SEpSet *pEpSet) {
SQnodeMgmt *pMgmt = &pDnode->qmgmt;
SQnode *pQnode = dndAcquireQnode(pDnode);
dndWriteQnodeMsgToQueue(pQnode, pMgmt->pQueryQ, pMsg);
dndReleaseQnode(pDnode, pQnode);
}
void dndProcessQnodeFetchMsg(SDnode *pDnode, SRpcMsg *pMsg, SEpSet *pEpSet) {
SQnodeMgmt *pMgmt = &pDnode->qmgmt;
SQnode *pQnode = dndAcquireQnode(pDnode);
dndWriteQnodeMsgToQueue(pQnode, pMgmt->pFetchQ, pMsg);
dndReleaseQnode(pDnode, pQnode);
}
static int32_t dndAllocQnodeQueryQueue(SDnode *pDnode) {
SQnodeMgmt *pMgmt = &pDnode->qmgmt;
pMgmt->pQueryQ = tWorkerAllocQueue(&pMgmt->queryPool, pDnode, (FProcessItem)dndProcessQnodeQueue);
if (pMgmt->pQueryQ == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return -1;
}
return 0;
}
static void dndFreeQnodeQueryQueue(SDnode *pDnode) {
SQnodeMgmt *pMgmt = &pDnode->qmgmt;
tWorkerFreeQueue(&pMgmt->queryPool, pMgmt->pQueryQ);
pMgmt->pQueryQ = NULL;
}
static int32_t dndInitQnodeQueryWorker(SDnode *pDnode) {
SQnodeMgmt *pMgmt = &pDnode->qmgmt;
SWorkerPool *pPool = &pMgmt->queryPool;
pPool->name = "qnode-query";
pPool->min = 0;
pPool->max = 1;
if (tWorkerInit(pPool) != 0) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return -1;
}
dDebug("qnode query worker is initialized");
return 0;
}
static void dndCleanupQnodeQueryWorker(SDnode *pDnode) {
SQnodeMgmt *pMgmt = &pDnode->qmgmt;
tWorkerCleanup(&pMgmt->queryPool);
dDebug("qnode query worker is closed");
}
static int32_t dndAllocQnodeFetchQueue(SDnode *pDnode) {
SQnodeMgmt *pMgmt = &pDnode->qmgmt;
pMgmt->pFetchQ = tWorkerAllocQueue(&pMgmt->queryPool, pDnode, (FProcessItem)dndProcessQnodeQueue);
if (pMgmt->pFetchQ == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return -1;
}
return 0;
}
static void dndFreeQnodeFetchQueue(SDnode *pDnode) {
SQnodeMgmt *pMgmt = &pDnode->qmgmt;
tWorkerFreeQueue(&pMgmt->fetchPool, pMgmt->pFetchQ);
pMgmt->pFetchQ = NULL;
}
static int32_t dndInitQnodeFetchWorker(SDnode *pDnode) {
SQnodeMgmt *pMgmt = &pDnode->qmgmt;
SWorkerPool *pPool = &pMgmt->fetchPool;
pPool->name = "qnode-fetch";
pPool->min = 0;
pPool->max = 1;
if (tWorkerInit(pPool) != 0) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return -1;
}
dDebug("qnode fetch worker is initialized");
return 0;
}
static void dndCleanupQnodeFetchWorker(SDnode *pDnode) {
SQnodeMgmt *pMgmt = &pDnode->qmgmt;
tWorkerCleanup(&pMgmt->fetchPool);
dDebug("qnode fetch worker is closed");
}
int32_t dndInitQnode(SDnode *pDnode) {
dInfo("dnode-qnode start to init");
SQnodeMgmt *pMgmt = &pDnode->qmgmt;
taosInitRWLatch(&pMgmt->latch);
char path[PATH_MAX];
snprintf(path, PATH_MAX, "%s/qnode.json", pDnode->dir.dnode);
pMgmt->file = strdup(path);
if (pMgmt->file == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return -1;
}
if (dndReadQnodeFile(pDnode) != 0) {
return -1;
}
if (pMgmt->dropped) return 0;
if (!pMgmt->deployed) return 0;
return dndOpenQnode(pDnode);
}
void dndCleanupQnode(SDnode *pDnode) {
SQnodeMgmt *pMgmt = &pDnode->qmgmt;
dInfo("dnode-qnode start to clean up");
if (pMgmt->pQnode) dndStopQnodeWorker(pDnode);
tfree(pMgmt->file);
qndClose(pMgmt->pQnode);
pMgmt->pQnode = NULL;
dInfo("dnode-qnode is cleaned up");
}
......@@ -19,8 +19,6 @@
#include "dndTransport.h"
#include "dndVnodes.h"
#include "sync.h"
#include "tcache.h"
#include "tcrc32c.h"
#include "wal.h"
EStat dndGetStat(SDnode *pDnode) { return pDnode->stat; }
......@@ -86,12 +84,14 @@ static int32_t dndInitEnv(SDnode *pDnode, SDnodeOpt *pOption) {
char path[PATH_MAX + 100];
snprintf(path, sizeof(path), "%s%smnode", pOption->dataDir, TD_DIRSEP);
pDnode->dir.mnode = tstrdup(path);
snprintf(path, sizeof(path), "%s%svnode", pOption->dataDir, TD_DIRSEP);
pDnode->dir.vnodes = tstrdup(path);
snprintf(path, sizeof(path), "%s%sdnode", pOption->dataDir, TD_DIRSEP);
pDnode->dir.dnode = tstrdup(path);
snprintf(path, sizeof(path), "%s%ssnode", pOption->dataDir, TD_DIRSEP);
pDnode->dir.snode = tstrdup(path);
snprintf(path, sizeof(path), "%s%sbnode", pOption->dataDir, TD_DIRSEP);
pDnode->dir.bnode = tstrdup(path);
if (pDnode->dir.mnode == NULL || pDnode->dir.vnodes == NULL || pDnode->dir.dnode == NULL) {
dError("failed to malloc dir object");
......@@ -117,22 +117,28 @@ static int32_t dndInitEnv(SDnode *pDnode, SDnodeOpt *pOption) {
return -1;
}
if (taosMkDir(pDnode->dir.snode) != 0) {
dError("failed to create dir:%s since %s", pDnode->dir.snode, strerror(errno));
terrno = TAOS_SYSTEM_ERROR(errno);
return -1;
}
if (taosMkDir(pDnode->dir.bnode) != 0) {
dError("failed to create dir:%s since %s", pDnode->dir.bnode, strerror(errno));
terrno = TAOS_SYSTEM_ERROR(errno);
return -1;
}
memcpy(&pDnode->opt, pOption, sizeof(SDnodeOpt));
return 0;
}
static void dndCleanupEnv(SDnode *pDnode) {
if (pDnode->dir.mnode != NULL) {
tfree(pDnode->dir.mnode);
}
if (pDnode->dir.vnodes != NULL) {
tfree(pDnode->dir.vnodes);
}
if (pDnode->dir.dnode != NULL) {
tfree(pDnode->dir.dnode);
}
tfree(pDnode->dir.mnode);
tfree(pDnode->dir.vnodes);
tfree(pDnode->dir.dnode);
tfree(pDnode->dir.snode);
tfree(pDnode->dir.bnode);
if (pDnode->lockFd >= 0) {
taosUnLockFile(pDnode->lockFd);
......@@ -176,7 +182,7 @@ SDnode *dndInit(SDnodeOpt *pOption) {
return NULL;
}
if (vnodeInit(1) != 0) {
if (vnodeInit(pDnode->opt.numOfCommitThreads) != 0) {
dError("failed to init vnode env");
dndCleanup(pDnode);
return NULL;
......
......@@ -27,6 +27,7 @@ SDnodeOpt TestServer::BuildOption(const char* path, const char* fqdn, uint16_t p
option.sver = 1;
option.numOfCores = 1;
option.numOfSupportVnodes = 1;
option.numOfCommitThreads = 1;
option.statusInterval = 1;
option.numOfThreadsPerCore = 1;
option.ratioOfQueryCores = 1;
......
......@@ -4,4 +4,11 @@ target_include_directories(
qnode
PUBLIC "${CMAKE_SOURCE_DIR}/include/dnode/qnode"
private "${CMAKE_CURRENT_SOURCE_DIR}/inc"
)
target_link_libraries(
qnode
PRIVATE transport
PRIVATE os
PRIVATE common
PRIVATE util
)
\ No newline at end of file
......@@ -16,10 +16,27 @@
#ifndef _TD_QNODE_INT_H_
#define _TD_QNODE_INT_H_
#include "os.h"
#include "tlog.h"
#include "tmsg.h"
#include "trpc.h"
#include "qnode.h"
#ifdef __cplusplus
extern "C" {
#endif
typedef struct SQnode {
int32_t dnodeId;
int64_t clusterId;
SQnodeCfg cfg;
SendMsgToDnodeFp sendMsgToDnodeFp;
SendMsgToMnodeFp sendMsgToMnodeFp;
SendRedirectMsgFp sendRedirectMsgFp;
} SQnode;
#ifdef __cplusplus
}
#endif
......
......@@ -11,4 +11,25 @@
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
\ No newline at end of file
*/
#include "qndInt.h"
SQnode *qndOpen(const SQnodeOpt *pOption) {
SQnode *pQnode = calloc(1, sizeof(SQnode));
return pQnode;
}
void qndClose(SQnode *pQnode) { free(pQnode); }
int32_t qndGetLoad(SQnode *pQnode, SQnodeLoad *pLoad) { return 0; }
int32_t qndProcessQueryReq(SQnode *pQnode, SRpcMsg *pMsg, SRpcMsg **pRsp) {
*pRsp = NULL;
return 0;
}
int32_t qndProcessFetchReq(SQnode *pQnode, SRpcMsg *pMsg, SRpcMsg **pRsp) {
*pRsp = NULL;
return 0;
}
aux_source_directory(src SNODE_SRC)
add_library(snode ${SNODE_SRC})
target_include_directories(
snode
PUBLIC "${CMAKE_SOURCE_DIR}/include/dnode/snode"
private "${CMAKE_CURRENT_SOURCE_DIR}/inc"
)
target_link_libraries(
snode
PRIVATE transport
PRIVATE os
PRIVATE common
PRIVATE util
)
\ No newline at end of file
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _TD_SNODE_INT_H_
#define _TD_SNODE_INT_H_
#include "os.h"
#include "tlog.h"
#include "tmsg.h"
#include "trpc.h"
#include "snode.h"
#ifdef __cplusplus
extern "C" {
#endif
typedef struct SSnode {
int32_t dnodeId;
int64_t clusterId;
SSnodeCfg cfg;
SendMsgToDnodeFp sendMsgToDnodeFp;
SendMsgToMnodeFp sendMsgToMnodeFp;
SendRedirectMsgFp sendRedirectMsgFp;
} SSnode;
#ifdef __cplusplus
}
#endif
#endif /*_TD_SNODE_INT_H_*/
\ No newline at end of file
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "sndInt.h"
SSnode *sndOpen(const SSnodeOpt *pOption) {
SSnode *pSnode = calloc(1, sizeof(SSnode));
return pSnode;
}
void sndClose(SSnode *pSnode) { free(pSnode); }
int32_t sndGetLoad(SSnode *pSnode, SSnodeLoad *pLoad) { return 0; }
int32_t sndProcessWriteMsg(SSnode *pSnode, SRpcMsg *pMsg, SRpcMsg **pRsp) {
*pRsp = NULL;
return 0;
}
......@@ -256,6 +256,24 @@ TAOS_DEFINE_ERROR(TSDB_CODE_DND_MNODE_ID_INVALID, "Mnode Id invalid")
TAOS_DEFINE_ERROR(TSDB_CODE_DND_MNODE_ID_NOT_FOUND, "Mnode Id not found")
TAOS_DEFINE_ERROR(TSDB_CODE_DND_MNODE_READ_FILE_ERROR, "Read mnode.json error")
TAOS_DEFINE_ERROR(TSDB_CODE_DND_MNODE_WRITE_FILE_ERROR, "Write mnode.json error")
TAOS_DEFINE_ERROR(TSDB_CODE_DND_QNODE_ALREADY_DEPLOYED, "Qnode already deployed")
TAOS_DEFINE_ERROR(TSDB_CODE_DND_QNODE_NOT_DEPLOYED, "Qnode not deployed")
TAOS_DEFINE_ERROR(TSDB_CODE_DND_QNODE_ID_INVALID, "Qnode Id invalid")
TAOS_DEFINE_ERROR(TSDB_CODE_DND_QNODE_ID_NOT_FOUND, "Qnode Id not found")
TAOS_DEFINE_ERROR(TSDB_CODE_DND_QNODE_READ_FILE_ERROR, "Read qnode.json error")
TAOS_DEFINE_ERROR(TSDB_CODE_DND_QNODE_WRITE_FILE_ERROR, "Write qnode.json error")
TAOS_DEFINE_ERROR(TSDB_CODE_DND_SNODE_ALREADY_DEPLOYED, "Snode already deployed")
TAOS_DEFINE_ERROR(TSDB_CODE_DND_SNODE_NOT_DEPLOYED, "Snode not deployed")
TAOS_DEFINE_ERROR(TSDB_CODE_DND_SNODE_ID_INVALID, "Snode Id invalid")
TAOS_DEFINE_ERROR(TSDB_CODE_DND_SNODE_ID_NOT_FOUND, "Snode Id not found")
TAOS_DEFINE_ERROR(TSDB_CODE_DND_SNODE_READ_FILE_ERROR, "Read snode.json error")
TAOS_DEFINE_ERROR(TSDB_CODE_DND_SNODE_WRITE_FILE_ERROR, "Write snode.json error")
TAOS_DEFINE_ERROR(TSDB_CODE_DND_BNODE_ALREADY_DEPLOYED, "Bnode already deployed")
TAOS_DEFINE_ERROR(TSDB_CODE_DND_BNODE_NOT_DEPLOYED, "Bnode not deployed")
TAOS_DEFINE_ERROR(TSDB_CODE_DND_BNODE_ID_INVALID, "Bnode Id invalid")
TAOS_DEFINE_ERROR(TSDB_CODE_DND_BNODE_ID_NOT_FOUND, "Bnode Id not found")
TAOS_DEFINE_ERROR(TSDB_CODE_DND_BNODE_READ_FILE_ERROR, "Read bnode.json error")
TAOS_DEFINE_ERROR(TSDB_CODE_DND_BNODE_WRITE_FILE_ERROR, "Write bnode.json error")
TAOS_DEFINE_ERROR(TSDB_CODE_DND_VNODE_TOO_MANY_VNODES, "Too many vnode directories")
TAOS_DEFINE_ERROR(TSDB_CODE_DND_VNODE_READ_FILE_ERROR, "Read vnodes.json error")
TAOS_DEFINE_ERROR(TSDB_CODE_DND_VNODE_WRITE_FILE_ERROR, "Write vnodes.json error")
......@@ -360,132 +378,6 @@ TAOS_DEFINE_ERROR(TSDB_CODE_WAL_APP_ERROR, "Unexpected generic er
TAOS_DEFINE_ERROR(TSDB_CODE_WAL_FILE_CORRUPTED, "WAL file is corrupted")
TAOS_DEFINE_ERROR(TSDB_CODE_WAL_SIZE_LIMIT, "WAL size exceeds limit")
// http
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_SERVER_OFFLINE, "http server is not onlin")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_UNSUPPORT_URL, "url is not support")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_INVALID_URL, "invalid url format")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_NO_ENOUGH_MEMORY, "no enough memory")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_REQUSET_TOO_BIG, "request size is too big")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_NO_AUTH_INFO, "no auth info input")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_NO_MSG_INPUT, "request is empty")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_NO_SQL_INPUT, "no sql input")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_NO_EXEC_USEDB, "no need to execute use db cmd")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_SESSION_FULL, "session list was full")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_GEN_TAOSD_TOKEN_ERR, "generate taosd token error")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_INVALID_MULTI_REQUEST, "size of multi request is 0")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_CREATE_GZIP_FAILED, "failed to create gzip")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_FINISH_GZIP_FAILED, "failed to finish gzip")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_LOGIN_FAILED, "failed to login")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_INVALID_VERSION, "invalid http version")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_INVALID_CONTENT_LENGTH, "invalid content length")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_INVALID_AUTH_TYPE, "invalid type of Authorization")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_INVALID_AUTH_FORMAT, "invalid format of Authorization")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_INVALID_BASIC_AUTH, "invalid basic Authorization")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_INVALID_TAOSD_AUTH, "invalid taosd Authorization")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_PARSE_METHOD_FAILED, "failed to parse method")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_PARSE_TARGET_FAILED, "failed to parse target")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_PARSE_VERSION_FAILED, "failed to parse http version")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_PARSE_SP_FAILED, "failed to parse sp")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_PARSE_STATUS_FAILED, "failed to parse status")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_PARSE_PHRASE_FAILED, "failed to parse phrase")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_PARSE_CRLF_FAILED, "failed to parse crlf")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_PARSE_HEADER_FAILED, "failed to parse header")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_PARSE_HEADER_KEY_FAILED, "failed to parse header key")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_PARSE_HEADER_VAL_FAILED, "failed to parse header val")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_PARSE_CHUNK_SIZE_FAILED, "failed to parse chunk size")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_PARSE_CHUNK_FAILED, "failed to parse chunk")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_PARSE_END_FAILED, "failed to parse end section")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_PARSE_INVALID_STATE, "invalid parse state")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_PARSE_ERROR_STATE, "failed to parse error section")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_GC_QUERY_NULL, "query size is 0")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_GC_QUERY_SIZE, "query size can not more than 100")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_GC_REQ_PARSE_ERROR, "parse grafana json error")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_DB_NOT_INPUT, "database name can not be null")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_DB_TOO_LONG, "database name too long")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_INVALID_JSON, "invalid telegraf json fromat")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_METRICS_NULL, "metrics size is 0")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_METRICS_SIZE, "metrics size can not more than 1K")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_METRIC_NULL, "metric name not find")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_METRIC_TYPE, "metric name type should be string")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_METRIC_NAME_NULL, "metric name length is 0")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_METRIC_NAME_LONG, "metric name length too long")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_TIMESTAMP_NULL, "timestamp not find")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_TIMESTAMP_TYPE, "timestamp type should be integer")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_TIMESTAMP_VAL_NULL, "timestamp value smaller than 0")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_TAGS_NULL, "tags not find")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_TAGS_SIZE_0, "tags size is 0")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_TAGS_SIZE_LONG, "tags size too long")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_TAG_NULL, "tag is null")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_TAG_NAME_NULL, "tag name is null")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_TAG_NAME_SIZE, "tag name length too long")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_TAG_VALUE_TYPE, "tag value type should be number or string")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_TAG_VALUE_NULL, "tag value is null")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_TABLE_NULL, "table is null")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_TABLE_SIZE, "table name length too long")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_FIELDS_NULL, "fields not find")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_FIELDS_SIZE_0, "fields size is 0")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_FIELDS_SIZE_LONG, "fields size too long")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_FIELD_NULL, "field is null")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_FIELD_NAME_NULL, "field name is null")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_FIELD_NAME_SIZE, "field name length too long")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_FIELD_VALUE_TYPE, "field value type should be number or string")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_FIELD_VALUE_NULL, "field value is null")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_HOST_NOT_STRING, "host type should be string")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_TG_STABLE_NOT_EXIST, "stable not exist")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_DB_NOT_INPUT, "database name can not be null")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_DB_TOO_LONG, "database name too long")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_INVALID_JSON, "invalid opentsdb json fromat")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_METRICS_NULL, "metrics size is 0")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_METRICS_SIZE, "metrics size can not more than 10K")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_METRIC_NULL, "metric name not find")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_METRIC_TYPE, "metric name type should be string")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_METRIC_NAME_NULL, "metric name length is 0")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_METRIC_NAME_LONG, "metric name length can not more than 22")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_TIMESTAMP_NULL, "timestamp not find")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_TIMESTAMP_TYPE, "timestamp type should be integer")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_TIMESTAMP_VAL_NULL, "timestamp value smaller than 0")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_TAGS_NULL, "tags not find")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_TAGS_SIZE_0, "tags size is 0")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_TAGS_SIZE_LONG, "tags size too long")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_TAG_NULL, "tag is null")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_TAG_NAME_NULL, "tag name is null")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_TAG_NAME_SIZE, "tag name length too long")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_TAG_VALUE_TYPE, "tag value type should be boolean, number or string")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_TAG_VALUE_NULL, "tag value is null")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_TAG_VALUE_TOO_LONG, "tag value can not more than 64")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_VALUE_NULL, "value not find")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_VALUE_TYPE, "value type should be boolean, number or string")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_REQUEST_JSON_ERROR, "http request json error")
// odbc
TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_OOM, "out of memory")
TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_CONV_CHAR_NOT_NUM, "convertion not a valid literal input")
TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_CONV_UNDEF, "convertion undefined")
TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_CONV_TRUNC_FRAC, "convertion fractional truncated")
TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_CONV_TRUNC, "convertion truncated")
TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_CONV_NOT_SUPPORT, "convertion not supported")
TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_CONV_OOR, "convertion numeric value out of range")
TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_OUT_OF_RANGE, "out of range")
TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_NOT_SUPPORT, "not supported yet")
TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_INVALID_HANDLE, "invalid handle")
TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_NO_RESULT, "no result set")
TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_NO_FIELDS, "no fields returned")
TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_INVALID_CURSOR, "invalid cursor")
TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_STATEMENT_NOT_READY, "statement not ready")
TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_CONNECTION_BUSY, "connection still busy")
TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_BAD_CONNSTR, "bad connection string")
TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_BAD_ARG, "bad argument")
TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_CONV_NOT_VALID_TS, "not a valid timestamp")
TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_CONV_SRC_TOO_LARGE, "src too large")
TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_CONV_SRC_BAD_SEQ, "src bad sequence")
TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_CONV_SRC_INCOMPLETE, "src incomplete")
TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_CONV_SRC_GENERAL, "src general")
// tfs
TAOS_DEFINE_ERROR(TSDB_CODE_FS_OUT_OF_MEMORY, "tfs out of memory")
TAOS_DEFINE_ERROR(TSDB_CODE_FS_INVLD_CFG, "tfs invalid mount config")
......
system sh/stop_dnodes.sh
sleep 2000
system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c wallevel -v 0
system sh/cfg.sh -n dnode1 -c http -v 1
system sh/cfg.sh -n dnode1 -c httpEnableRecordSql -v 1
system sh/exec.sh -n dnode1 -s start
sleep 2000
sql connect
print ============================ dnode1 start
print =============== step1 - prepare data
sql create database db
sql use db
sql create table if not exists db.win_cpu(ts timestamp,f_percent_dpc_time double,f_percent_idle_time double,f_percent_interrupt_time double,f_percent_privileged_time double,f_percent_processor_time double,f_percent_user_time double) tags(t_host binary(32),t_instance binary(32),t_objectname binary(32));
print =============== step2 - auto create
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'import into db.win_cpu_windows_1_processor using db.win_cpu tags('windows','1','Processor') values(1564641722000,0.000000,95.598305,0.000000,0.000000,0.000000,0.000000);' 127.0.0.1:7111/rest/sql
print curl 127.0.0.1:7111/rest/sql -----> $system_content
#if $system_content != @{"status":"succ","head":["ts","i"],"data":[["2017-12-25 21:28:41.022",1],["2017-12-25 21:28:42.022",2],["2017-12-25 21:28:43.022",3],["2017-12-25 21:28:44.022",4],["2017-12-25 21:28:45.022",5],["2017-12-25 21:28:46.022",6],["2017-12-25 21:28:47.022",7],["2017-12-25 21:28:48.022",8],["2017-12-25 21:28:49.022",9],["2017-12-25 21:28:50.022",10],["2017-12-25 21:28:51.022",11]],"rows":11}@ then
# return -1
#endi
sql select * from db.win_cpu_windows_1_processor
print rows: $rows
if $rows != 1 then
return -1
endi
#system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package main
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"math/rand"
"net/http"
"os"
"sync"
"sync/atomic"
"time"
"flag"
)
var (
token string
url string
config Config
request int64
period int64
errorNum int64
)
type Config struct {
HostIp string `json:"hostIp"`
ConnNum int `json:"connNum"`
InsertModel string `json:"insertModel"`
WaitTime int `json:"waitTime"`
TableDesc string `json:"tableDesc"`
TablePrefix string `json:"tablePrefix"`
TablePerConn int `json:"tablePerConn"`
TableCreate bool `json:"tableCreate"`
TableStart int `json:"tableStart"`
DbName string `json:"dbName"`
DbReplica int `json:"dbReplica"`
DbKeep int `json:"dbKeep"`
DbDays int `json:"dbDays"`
MetricsName string `json:"metricsName"`
TagNum int `json:"tagNum"`
DataNum int `json:"dataNum"`
DataBegin int64 `json:"dataBegin"`
DataInterval int `json:"dataInterval"`
DataBatch int `json:"dataBatch"`
DataInsert bool `json:"dataInsert"`
DataRandom bool `json:"dataRandom"`
}
type TokenResult struct {
Status string `json:"status"`
Code int `json:"code"`
Desc string `json:"desc"`
}
type JsonResult struct {
Status string `json:"status"`
Code int `json:"code"`
}
func readFile(filename string) {
file, err := os.Open(filename)
if err != nil {
println("taos_cloud.json not found")
panic(err)
}
defer file.Close()
dec := json.NewDecoder(file)
err = dec.Decode(&config)
if err != nil {
println("taos_cloud.json parse error")
panic(err)
}
if config.TagNum <= 0 {
config.TagNum = 1
}
request = 0
period = 0
errorNum = 0
fmt.Println("================config parameters======================")
fmt.Println("HostIp:", config.HostIp)
fmt.Println("connNum:", config.ConnNum)
fmt.Println("insertModel:", config.InsertModel)
fmt.Println("waitTime:", config.WaitTime)
fmt.Println("tableDesc:", config.TableDesc)
fmt.Println("tablePrefix:", config.TablePrefix)
fmt.Println("tablePerConn:", config.TablePerConn)
fmt.Println("tableCreate:", config.TableCreate)
fmt.Println("tableStart:", config.TableStart)
fmt.Println("dbName:", config.DbName)
fmt.Println("dbReplica:", config.DbReplica)
fmt.Println("dbKeep:", config.DbKeep)
fmt.Println("dbDays:", config.DbDays)
fmt.Println("metricsName:", config.MetricsName)
fmt.Println("tagNum:", config.TagNum)
fmt.Println("dataNum:", config.DataNum)
fmt.Println("dataBegin:", config.DataBegin)
fmt.Println("dataInterval:", config.DataInterval)
fmt.Println("dataBatch:", config.DataBatch)
fmt.Println("dataInsert:", config.DataInsert)
fmt.Println("dataRandom:", config.DataRandom)
fmt.Println("================http token=============================")
token, err = getToken()
url = fmt.Sprintf("http://%s:%d/rest/sql", config.HostIp, 6020)
fmt.Println("httpToken:", token)
fmt.Println("httpUrl:", url)
if err != nil {
panic(err)
}
}
func getToken() (string, error) {
resp, err := http.Get(fmt.Sprintf("http://%s:%d/rest/login/%s/%s", config.HostIp, 6020, "root", "taosdata"))
if err != nil {
return "", err
}
defer resp.Body.Close()
var tokenResult TokenResult
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
return "", err
}
err = json.Unmarshal(data, &tokenResult)
if err != nil {
return "", err
}
if tokenResult.Status != "succ" {
fmt.Println("get http token failed")
fmt.Println(tokenResult)
return "", err
}
return tokenResult.Desc, nil
}
func exec(client *http.Client, sql string) {
for times := 0; times < 10; times++ {
req, err1 := http.NewRequest("POST", url, bytes.NewReader([]byte(sql)))
if err1 != nil {
continue
}
req.Header.Add("Authorization", "Taosd "+token)
begin := time.Now()
resp, err := client.Do(req)
if err != nil {
continue
}
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
resp.Body.Close()
continue
}
spend := (time.Since(begin).Nanoseconds())
var jsonResult JsonResult
err = json.Unmarshal(data, &jsonResult)
if err != nil {
resp.Body.Close()
continue
}
if jsonResult.Status != "succ" {
resp.Body.Close()
continue
}
atomic.AddInt64(&request, 1)
if (request < 103) {
return
}
atomic.AddInt64(&period, spend)
if request%5000 == 0 && request != 0 {
requestAvg := float64(period) / float64(1000000) / float64(request)
qps := float64(1000) / float64(requestAvg) * float64(config.ConnNum)
dps := qps * float64(config.DataBatch)
fmt.Println("====== req:", request, ", error:", errorNum, ", qps:", int64(qps), ", wait:", int64(requestAvg), "ms", ", data per second:", int64(dps))
}
return
}
fmt.Println("xxxx>sql:", sql, ", retryTimes:", 10)
errorNum++
}
func insertTable(conn int) {
client := &http.Client{}
tbStart := conn*config.TablePerConn + config.TableStart
tmStart := config.DataBegin
for j := 0; j < config.DataNum; j++ {
for i := 0; i < config.TablePerConn; i++ {
tmVal := int64(j)*int64(config.DataInterval) + tmStart + 1
tbIndex := i + tbStart
dataVal := j
if config.DataRandom {
dataVal = rand.Intn(1000)
}
sql := fmt.Sprintf("import into %s.%s%d values(%d, %d)", config.DbName, config.TablePrefix, tbIndex, tmVal, dataVal)
exec(client, sql)
time.Sleep(time.Millisecond * time.Duration(10))
}
}
}
func insertLoop(conn int) {
client := &http.Client{}
tbStart := conn*config.TablePerConn + config.TableStart
tmStart := config.DataBegin
for j := 0; j < config.DataNum; j++ {
for i := 0; i < config.TablePerConn; i++ {
tbIndex := i + tbStart
tmVal := int64(j)*int64(config.DataInterval) + tmStart
dataVal := j
if config.DataRandom {
dataVal = rand.Intn(1000)
}
sql := fmt.Sprintf("insert into %s.%s%d values(%d, %d)", config.DbName, config.TablePrefix, tbIndex, tmVal, dataVal)
for k := 1; k < config.DataBatch; k++ {
tmVal := int64(j)*int64(config.DataInterval) + int64(k) + tmStart
dataVal := j + k
if config.DataRandom {
dataVal = rand.Intn(1000)
}
sql += fmt.Sprintf("values(%d, %d)", tmVal, dataVal)
}
j += (config.DataBatch - 1)
exec(client, sql)
if config.WaitTime != 0 {
time.Sleep(time.Millisecond * time.Duration(config.WaitTime))
}
}
}
}
func insertTb(wg *sync.WaitGroup, conn int) {
defer wg.Done()
if !config.DataInsert {
return
}
if config.InsertModel == "insertTable" {
insertTable(conn)
} else {
insertLoop(conn)
}
}
func selectData(wg *sync.WaitGroup, conn int) {
defer wg.Done()
client := &http.Client{}
tbStart := conn*config.TablePerConn + config.TableStart
for j := 0; j < config.DataNum; j++ {
tbIndex := 0 + tbStart
sql := fmt.Sprintf("select max(i),min(i) from db.mt where tbname in ('%s%d'", config.TablePrefix, tbIndex)
for i := 1; i < 2000; i++ {
tbIndex := i + tbStart
sql += fmt.Sprintf(",'%s%d'", config.TablePrefix, tbIndex)
}
sql += ") group by orgno"
//sql := fmt.Sprintf("select count(*) from db.mt")
//sql := fmt.Sprintf("select max(i),min(i) from db.mt", config.TablePrefix, tbIndex)
exec(client, sql)
time.Sleep(time.Millisecond * time.Duration(10))
}
}
func main() {
filename := flag.String("config", "taos_cloud.json", "config file name")
flag.Parse()
readFile(*filename)
fmt.Println("\n================http test start======================")
var wg sync.WaitGroup
fmt.Println("\n================select data ========================")
for i := 0; i < config.ConnNum; i++ {
wg.Add(1)
go insertTb(&wg, i)
}
for i := 0; i < config.ConnNum; i++ {
wg.Add(1)
go selectData(&wg, i)
}
wg.Wait()
fmt.Println("\n================http test stop ======================")
}
system sh/stop_dnodes.sh
sleep 2000
system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c wallevel -v 0
system sh/cfg.sh -n dnode1 -c http -v 1
system sh/cfg.sh -n dnode1 -c maxSQLLength -v 340032
system sh/exec.sh -n dnode1 -s start
sleep 2000
sql connect
print ============================ dnode1 start
print =============== step1 - prepare data
sql create database d1
sql use d1
sql create table table_rest (ts timestamp, i int)
print sql length is 270KB
restful d1 table_rest 1591072800 10000
restful d1 table_rest 1591172800 10000
restful d1 table_rest 1591272800 10000
restful d1 table_rest 1591372800 10000
restful d1 table_rest 1591472800 10000
restful d1 table_rest 1591572800 10000
restful d1 table_rest 1591672800 10000
restful d1 table_rest 1591772800 10000
restful d1 table_rest 1591872800 10000
restful d1 table_rest 1591972800 10000
sql select * from table_rest;
print rows: $rows
if $rows != 100000 then
return -1
endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
system sh/stop_dnodes.sh
sleep 2000
system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c walLevel -v 1
system sh/cfg.sh -n dnode1 -c http -v 1
#system sh/cfg.sh -n dnode1 -c adminRowLimit -v 10
system sh/cfg.sh -n dnode1 -c httpDebugFlag -v 135
system sh/exec.sh -n dnode1 -s start
sleep 2000
sql connect
print ============================ dnode1 start
print =============== step1 - prepare data
sql create database d1
sql use d1
sql create table table_gc (ts timestamp, db binary(10), tb binary(20), col binary(20))
sql create table m1 (ts timestamp, v1 int, v2 float)
sql create table m2 (ts timestamp, v1 int, v2 float)
sql insert into table_gc values('2017-12-25 21:28:41.022', 'd1', 'm1', 'v1')
sql insert into table_gc values('2017-12-25 21:28:42.022', 'd1', 'm1', 'v2')
sql insert into table_gc values('2017-12-25 21:28:43.022', 'd1', 'm2', 'v1')
sql insert into table_gc values('2017-12-25 21:28:44.022', 'd1', 'm2', 'v2')
sql insert into m1 values(1514208523020, 1, 4.1)
sql insert into m1 values(1514208523021, 2, 5.1)
sql insert into m1 values(1514208523022, 3, 6.1)
sql insert into m2 values(1514208523024, 3, 6.1)
sql insert into m2 values(1514208523025, 2, 5.1)
sql insert into m2 values(1514208523026, 1, 4.1)
sql create table mt (ts timestamp, i int) tags(a int, b binary(10))
sql create table t1 using mt tags (1, 'a')
sql create table t2 using mt tags (2, 'b')
sql create table t3 using mt tags (3, 'c')
sql insert into t1 values('2017-12-25 21:25:41', 1)
sql insert into t1 values('2017-12-25 21:26:41', 1)
sql insert into t1 values('2017-12-25 21:27:41', 1)
sql insert into t1 values('2017-12-25 21:28:41', 1)
sql insert into t1 values('2017-12-25 21:29:41', 1)
sql insert into t2 values('2017-12-25 21:25:41', 2)
sql insert into t2 values('2017-12-25 21:26:41', 2)
sql insert into t2 values('2017-12-25 21:27:41', 2)
sql insert into t2 values('2017-12-25 21:28:41', 2)
sql insert into t3 values('2017-12-25 21:25:41', 3)
sql insert into t3 values('2017-12-25 21:26:41', 3)
sql insert into t3 values('2017-12-25 21:27:41', 3)
print =============== step2 - login
system_content curl 127.0.0.1:7111/grafana/
print 1-> $system_content
if $system_content != @{"status":"error","code":4357,"desc":"no auth info input"}@ then
return -1
endi
system_content curl 127.0.0.1:7111/grafana/xx
print 2-> $system_content
if $system_content != @{"status":"error","code":4357,"desc":"no auth info input"}@ then
return -1
endi
system_content curl 127.0.0.1:7111/grafana/login/xx/xx/
print 3-> $system_content
if $system_content != @{"status":"error","code":849,"desc":"Invalid user"}@ then
return -1
endi
system_content curl 127.0.0.1:7111/grafana/root/1/123/1/1/3
print 4-> $system_content
if $system_content != @{"status":"error","code":4357,"desc":"no auth info input"}@ then
return -1
endi
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'show databases' 127.0.0.1:7111/grafana/login/1/root/1/
print 5-> $system_content
if $system_content != @{"status":"error","code":849,"desc":"Invalid user"}@ then
return -1
endi
system_content curl -H 'Authorization: Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpc3MiOiJ3d3cudGFvc2RhdGEuY29tIiwicGFzcyI6InRhb3NkYXRhIiwic3ViIjoicm9vdCJ9.xPv3b5odlR7YF8G_QWASjIRbMtA5v4ItToJ35fFgi' -d 'show databases' 127.0.0.1:7111/grafana/root/1/login
print 6-> $system_content
if $system_content != @{"status":"error","code":4387,"desc":"invalid format of Authorization"}@ then
return -1
endi
system_content curl -H 'Authorization: eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpc3MiOiJ3d3cudGFvc2RhdGEuY29tIiwicGFzcyI6InRhb3NkYXRhIiwic3ViIjoicm9vdCJ9.xPv3b5odlR7YF8G_QWASjIRbMtA5v4ItToJ35fFgi' -d 'show databases' 127.0.0.1:7111/grafana/root/1/login
print 7-> $system_content
if $system_content != @{"status":"error","code":4387,"desc":"invalid format of Authorization"}@ then
return -1
endi
sleep 2000
system_content curl 127.0.0.1:7111/grafana/login/root/taosdata
print 8-> $system_content
if $system_content != @{"status":"succ","code":0,"desc":"/KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04"}@ then
return -1
endi
print =============== step3 - heartbeat
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' 127.0.0.1:7111/grafana/d1/table_gc
print 9-> $system_content
if $system_content != @{"message":"Grafana server receive a quest from you!"}@ then
return -1
endi
print =============== step4 - search
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' 127.0.0.1:7111/grafana/heartbeat
print 10-> $system_content
if $system_content != @{"message":"Grafana server receive a quest from you!"}@ then
return -1
endi
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' 127.0.0.1:7111/grafana/d1/table_invalid/search
print 11-> $system_content
if $system_content != @{"message":"Grafana server receive a quest from you!"}@ then
return -1
endi
system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' 127.0.0.1:7111/grafana/d1/m1/search
print 12-> $system_content
if $system_content != @{"message":"Grafana server receive a quest from you!"}@ then
return -1
endi
print =============== step5 - query
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d '[{"refId":"A","alias":"taosd","sql":"select first(v1) from d1.m1 where ts > 1514208523020 and ts < 1514208523030 interval(1m)"},{"refId":"B","alias":"system","sql":"select first(v2) from d1.m1 where ts > 1514208523020 and ts < 1514208523030 interval(1m)"}]' 127.0.0.1:7111/grafana/query
print 13-> $system_content
if $system_content != @[{"refId":"A","target":"taosd","datapoints":[[2,1514208480000]]},{"refId":"B","target":"system","datapoints":[[5.10000,1514208480000]]}]@ then
return -1
endi
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d '[{"refId":"A","alias":"","sql":"select first(v1) from d1.m1 where ts > 1514208523020 and ts < 1514208523030 interval(1m)"},{"refId":"B","alias":"","sql":"select first(v2) from d1.m1 where ts > 1514208523020 and ts < 1514208523030 interval(1m)"}]' 127.0.0.1:7111/grafana/query
print 14-> $system_content
if $system_content != @[{"refId":"A","target":"A","datapoints":[[2,1514208480000]]},{"refId":"B","target":"B","datapoints":[[5.10000,1514208480000]]}]@ then
return -1
endi
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d '[{"refId":"A","alias":"","sql":"select count(v1) from d1.m1"},{"refId":"B","alias":"","sql":"select count(v2) from d1.m1"}]' 127.0.0.1:7111/grafana/query
print 15-> $system_content
if $system_content != @[{"refId":"A","target":"A","datapoints":[[3,"-"]]},{"refId":"B","target":"B","datapoints":[[3,"-"]]}]@ then
return -1
endi
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d '[{"refId":"A","alias":"","sql":"select count(v1) from d1.m1"},{"refId":"B","alias":"","sql":"select count(v2) from d1.m1"}]' 127.0.0.1:7111/grafana/query
print 15-> $system_content
if $system_content != @[{"refId":"A","target":"A","datapoints":[[3,"-"]]},{"refId":"B","target":"B","datapoints":[[3,"-"]]}]@ then
return -1
endi
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d '[{"refId":"A","alias":"","sql":"select count(i) from d1.mt group by a"},{"refId":"B","alias":"","sql":"select sum(i) from d1.mt group by b"}]' 127.0.0.1:7111/grafana/query
print 16-> $system_content
if $system_content != @[{"refId":"A","target":"{a:1,}","datapoints":[[5,"-"]]},{"refId":"A","target":"{a:2,}","datapoints":[[4,"-"]]},{"refId":"A","target":"{a:3,}","datapoints":[[3,"-"]]},{"refId":"B","target":"{b:a}","datapoints":[[5,"-"]]},{"refId":"B","target":"{b:b}","datapoints":[[8,"-"]]},{"refId":"B","target":"{b:c}","datapoints":[[9,"-"]]}]@ then
return -1
endi
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d '[{"refId":"A","alias":"count","sql":"select count(i) from d1.mt group by a"},{"refId":"B","alias":"sum-","sql":"select sum(i) from d1.mt group by b"}]' 127.0.0.1:7111/grafana/query
print 17-> $system_content
if $system_content != @[{"refId":"A","target":"count{a:1,}","datapoints":[[5,"-"]]},{"refId":"A","target":"count{a:2,}","datapoints":[[4,"-"]]},{"refId":"A","target":"count{a:3,}","datapoints":[[3,"-"]]},{"refId":"B","target":"sum-{b:a}","datapoints":[[5,"-"]]},{"refId":"B","target":"sum-{b:b}","datapoints":[[8,"-"]]},{"refId":"B","target":"sum-{b:c}","datapoints":[[9,"-"]]}]@ then
return -1
endi
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d '[{"refId":"A","alias":"count","sql":"select count(i) from d1.mt interval(1m) group by a "}]' 127.0.0.1:7111/grafana/query
print 18-> $system_content
if $system_content != @[{"refId":"A","target":"count{a:1,}","datapoints":[[1,1514208300000],[1,1514208360000],[1,1514208420000],[1,1514208480000],[1,1514208540000]]},{"refId":"A","target":"count{a:2,}","datapoints":[[1,1514208300000],[1,1514208360000],[1,1514208420000],[1,1514208480000]]},{"refId":"A","target":"count{a:3,}","datapoints":[[1,1514208300000],[1,1514208360000],[1,1514208420000]]}]@ then
return -1
endi
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d '[{"refId":"A","alias":"","sql":"select sum(v2), count(v1) from d1.m1"},{"refId":"B","alias":"","sql":"select count(v2), sum(v2) from d1.m1"}]' 127.0.0.1:7111/grafana/query
print 19-> $system_content
if $system_content != @[{"refId":"A","target":"{count(v1):3}","datapoints":[[15.299999714,"-"]]},{"refId":"B","target":"{sum(v2):15.299999714}","datapoints":[[3,"-"]]}]@ then
return -1
endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
system sh/stop_dnodes.sh
sleep 2000
system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c http -v 1
system sh/cfg.sh -n dnode1 -c walLevel -v 1
#system sh/cfg.sh -n dnode1 -c adminRowLimit -v 10
system sh/cfg.sh -n dnode1 -c httpDebugFlag -v 135
system sh/exec.sh -n dnode1 -s start
sql connect
sleep 2000
print ============================ dnode1 start
print =============== step0 - prepare data
sql create database db
sql use db
sql create table tb (ts timestamp, val int, val1 int, val2 int)
sql create table tb2 (ts timestamp, val int, val1 int, val2 int)
sql create table t2 (ts timestamp, val int)
sql create table tb3 (ts timestamp, val int, val1 int, val2 int)
sql insert into tb values('2020-01-01 00:00:00.000', 1, 11, 21)
sql insert into tb values('2020-01-02 00:00:00.000', 1, 12, 22)
sql insert into tb values('2020-01-03 00:00:00.000', 2, 13, 23)
sql insert into tb values('2020-01-04 00:00:00.000', 2, 14, 24)
sql insert into tb2 values('2020-01-01 00:00:00.000', 21, 211, 221)
sql insert into tb2 values('2020-01-02 00:00:00.000', 21, 212, 222)
sql insert into tb2 values('2020-01-03 00:00:00.000', 22, 213, 223)
sql insert into tb2 values('2020-01-04 00:00:00.000', 22, 214, 224)
sql insert into tb3 values('2020-01-01 00:00:00.000', NULL, NULL, NULL)
sql insert into tb3 values('2020-01-02 00:00:00.000', NULL, NULL, NULL)
print =============== step1 - one query, 1 column, with timestamp
system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"select ts from db.tb where ts >= 1577980800000 "} ]' 127.0.0.1:7111/grafana/query
print step1-> $system_content
if $system_content != @[{"refId":"A","target":"A","datapoints":[[null,1577980800000],[null,1578067200000]]}]@ then
return -1
endi
print =============== step2 - one query, 2 column, with timestamp
system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"select ts,val from db.tb where ts >= 1577980800000 "} ]' 127.0.0.1:7111/grafana/query
print step2-> $system_content
if $system_content != @[{"refId":"A","target":"A","datapoints":[[2,1577980800000],[2,1578067200000]]}]@ then
return -1
endi
print =============== step3 - one query, 3 column, with timestamp
system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"select ts,val,val1 from db.tb where ts >= 1577980800000 "} ]' 127.0.0.1:7111/grafana/query
print step3.1-> $system_content
if $system_content != @[{"refId":"A","target":"{val1:13,}","datapoints":[[2,1577980800000]]},{"refId":"A","target":"{val1:14,}","datapoints":[[2,1578067200000]]}]@ then
return -1
endi
system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"select ts,val,val1 from db.tb "} ]' 127.0.0.1:7111/grafana/query
print step3.2-> $system_content
if $system_content != @[{"refId":"A","target":"{val1:11,}","datapoints":[[1,1577808000000]]},{"refId":"A","target":"{val1:12,}","datapoints":[[1,1577894400000]]},{"refId":"A","target":"{val1:13,}","datapoints":[[2,1577980800000]]},{"refId":"A","target":"{val1:14,}","datapoints":[[2,1578067200000]]}]@ then
return -1
endi
system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"select ts,val1,val from db.tb "} ]' 127.0.0.1:7111/grafana/query
print step3.3-> $system_content
if $system_content != @[{"refId":"A","target":"{val:1,}","datapoints":[[11,1577808000000],[12,1577894400000]]},{"refId":"A","target":"{val:2,}","datapoints":[[13,1577980800000],[14,1578067200000]]}]@ then
return -1
endi
print =============== step4 - one query, 4 column, with timestamp
system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"select ts,val2,val1,val from db.tb "} ]' 127.0.0.1:7111/grafana/query
print step4.1-> $system_content
if $system_content != @[{"refId":"A","target":"{val1:11,, val:1,}","datapoints":[[21,1577808000000]]},{"refId":"A","target":"{val1:12,, val:1,}","datapoints":[[22,1577894400000]]},{"refId":"A","target":"{val1:13,, val:2,}","datapoints":[[23,1577980800000]]},{"refId":"A","target":"{val1:14,, val:2,}","datapoints":[[24,1578067200000]]}]@ then
return -1
endi
system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"select ts,val,val1,val2 from db.tb "} ]' 127.0.0.1:7111/grafana/query
print step4.2-> $system_content
if $system_content != @[{"refId":"A","target":"{val1:11,, val2:21,}","datapoints":[[1,1577808000000]]},{"refId":"A","target":"{val1:12,, val2:22,}","datapoints":[[1,1577894400000]]},{"refId":"A","target":"{val1:13,, val2:23,}","datapoints":[[2,1577980800000]]},{"refId":"A","target":"{val1:14,, val2:24,}","datapoints":[[2,1578067200000]]}]@ then
return -1
endi
print =============== step5 - one query, 1 column, no timestamp
system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"select val from db.tb where ts >= 1577980800000 "} ]' 127.0.0.1:7111/grafana/query
print step1-> $system_content
if $system_content != @[{"refId":"A","target":"A","datapoints":[[2,"-"],[2,"-"]]}]@ then
return -1
endi
print =============== step6 - one query, 2 column, no timestamp
system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"select val1,val2 from db.tb where ts >= 1577980800000 "} ]' 127.0.0.1:7111/grafana/query
print step1-> $system_content
if $system_content != @[{"refId":"A","target":"{val2:23,}","datapoints":[[13,"-"]]},{"refId":"A","target":"{val2:24,}","datapoints":[[14,"-"]]}]@ then
return -1
endi
print =============== step7 - one query, 3 column, no timestamp
system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"select val1,val2,val from db.tb where ts >= 1577980800000 "} ]' 127.0.0.1:7111/grafana/query
print step1-> $system_content
if $system_content != @[{"refId":"A","target":"{val2:23,, val:2,}","datapoints":[[13,"-"]]},{"refId":"A","target":"{val2:24,, val:2,}","datapoints":[[14,"-"]]}]@ then
return -1
endi
system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"select val1,val2,val from db.tb"} ]' 127.0.0.1:7111/grafana/query
print step1-> $system_content
if $system_content != @[{"refId":"A","target":"{val2:21,, val:1,}","datapoints":[[11,"-"]]},{"refId":"A","target":"{val2:22,, val:1,}","datapoints":[[12,"-"]]},{"refId":"A","target":"{val2:23,, val:2,}","datapoints":[[13,"-"]]},{"refId":"A","target":"{val2:24,, val:2,}","datapoints":[[14,"-"]]}]@ then
return -1
endi
print =============== step8 - one query, no return
system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"select val1,val2,val from db.tb where ts >= 1677980800000 "} ]' 127.0.0.1:7111/grafana/query
print step1-> $system_content
if $system_content != @[]@ then
return -1
endi
print =============== step9 - one query, insert sql
system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"insert into db.t2 values(now, 1) "} ]' 127.0.0.1:7111/grafana/query
print step1-> $system_content
if $system_content != @[]@ then
return -1
endi
print =============== step10 - one query, error sql
system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"select * from db.tt "} ]' 127.0.0.1:7111/grafana/query
print step1-> $system_content
if $system_content != @[]@ then
return -1
endi
print =============== step11 - two query, 1 column, with timestamp, 1 column, with timestamp
system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"B","alias":"BB","sql":"select ts from db.tb2 where ts >= 1577980800000 "},{"refId":"A","alias":"","sql":"select ts from db.tb where ts >= 1577980800000 "} ]' 127.0.0.1:7111/grafana/query
print step1-> $system_content
if $system_content != @[{"refId":"B","target":"BB","datapoints":[[null,1577980800000],[null,1578067200000]]},{"refId":"A","target":"A","datapoints":[[null,1577980800000],[null,1578067200000]]}]@ then
return -1
endi
print =============== step12 - two query, 1 column, with timestamp, 2 column, with timestamp
system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"B","alias":"BB","sql":"select ts from db.tb2 where ts >= 1577980800000 "},{"refId":"A","alias":"","sql":"select ts,val from db.tb where ts >= 1577980800000 "} ]' 127.0.0.1:7111/grafana/query
print step1-> $system_content
if $system_content != @[{"refId":"B","target":"BB","datapoints":[[null,1577980800000],[null,1578067200000]]},{"refId":"A","target":"A","datapoints":[[2,1577980800000],[2,1578067200000]]}]@ then
return -1
endi
print =============== step13 - two query, 1 column, with timestamp, 3 column, with timestamp
system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"B","alias":"BB","sql":"select ts from db.tb2 where ts >= 1577980800000 "},{"refId":"A","alias":"","sql":"select ts,val,val1 from db.tb where ts >= 1577980800000 "} ]' 127.0.0.1:7111/grafana/query
print step1-> $system_content
if $system_content != @[{"refId":"B","target":"BB","datapoints":[[null,1577980800000],[null,1578067200000]]},{"refId":"A","target":"{val1:13,}","datapoints":[[2,1577980800000]]},{"refId":"A","target":"{val1:14,}","datapoints":[[2,1578067200000]]}]@ then
return -1
endi
print =============== step14 - two query, 2 column, with timestamp, 2 column, with timestamp
system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"B","alias":"BB","sql":"select ts, val2 from db.tb2 where ts >= 1577980800000 "},{"refId":"A","alias":"AA","sql":"select ts,val from db.tb where ts >= 1577980800000 "} ]' 127.0.0.1:7111/grafana/query
print step1-> $system_content
if $system_content != @[{"refId":"B","target":"BB","datapoints":[[223,1577980800000],[224,1578067200000]]},{"refId":"A","target":"AA","datapoints":[[2,1577980800000],[2,1578067200000]]}]@ then
return -1
endi
print =============== step15 - two query, 2 column, with timestamp, 3 column, with timestamp
system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"B","alias":"BB","sql":"select ts, val2, val1 from db.tb2 where ts >= 1577980800000 "},{"refId":"A","alias":"AA","sql":"select ts,val from db.tb where ts >= 1577980800000 "} ]' 127.0.0.1:7111/grafana/query
print step1-> $system_content
if $system_content != @[{"refId":"B","target":"BB{val1:213,}","datapoints":[[223,1577980800000]]},{"refId":"B","target":"BB{val1:214,}","datapoints":[[224,1578067200000]]},{"refId":"A","target":"AA","datapoints":[[2,1577980800000],[2,1578067200000]]}]@ then
return -1
endi
print =============== step16 - two query, 3 column, with timestamp, 4 column, with timestamp
system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"B","alias":"BB","sql":"select ts, val, val1, val2, val1 from db.tb2 where ts >= 1577980800000 "},{"refId":"A","alias":"AA","sql":"select ts,val,val1 from db.tb where ts >= 1577980800000 "} ]' 127.0.0.1:7111/grafana/query
print step1-> $system_content
if $system_content != @[{"refId":"B","target":"BB{val1:213,, val2:223,, val1:213,}","datapoints":[[22,1577980800000]]},{"refId":"B","target":"BB{val1:214,, val2:224,, val1:214,}","datapoints":[[22,1578067200000]]},{"refId":"A","target":"AA{val1:13,}","datapoints":[[2,1577980800000]]},{"refId":"A","target":"AA{val1:14,}","datapoints":[[2,1578067200000]]}]@ then
return -1
endi
print =============== step17 - two query, 2 column, with timestamp, no return
system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"B","alias":"BB","sql":"select ts, val from db.tb2 where ts >= 1577980800000 "},{"refId":"A","alias":"AA","sql":"select ts,val from db.tb where ts >= 1677980800000 "} ]' 127.0.0.1:7111/grafana/query
print step1-> $system_content
if $system_content != @[{"refId":"B","target":"BB","datapoints":[[22,1577980800000],[22,1578067200000]]}]@ then
return -1
endi
print =============== step18 - two query, 2 column, with timestamp, invalid sql
system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"B","alias":"BB","sql":"select ts, val from db.tb2 where ts >= 1577980800000 "},{"refId":"A","alias":"AA","sql":"select ts,val from db.tb222 where ts >= 1677980800000 "} ]' 127.0.0.1:7111/grafana/query
print step1-> $system_content
if $system_content != @[{"refId":"B","target":"BB","datapoints":[[22,1577980800000],[22,1578067200000]]}]@ then
return -1
endi
print =============== step19 - two query, 2 column, with timestamp, insert sql
system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"B","alias":"BB","sql":"select ts, val from db.tb2 where ts >= 1577980800000 "},{"refId":"A","alias":"AA","sql":"insert into db.t2 values(now, 1)"} ]' 127.0.0.1:7111/grafana/query
print step1-> $system_content
if $system_content != @[{"refId":"B","target":"BB","datapoints":[[22,1577980800000],[22,1578067200000]]}]@ then
return -1
endi
print =============== step20 - two query, 1 column, no timestamp, 1 column, with timestamp
system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"B","alias":"BB","sql":"select val from db.tb2 where ts >= 1577980800000 "},{"refId":"A","alias":"AA","sql":"select ts from db.tb2 where ts >= 1577980800000 "} ]' 127.0.0.1:7111/grafana/query
print step1-> $system_content
if $system_content != @[{"refId":"B","target":"BB","datapoints":[[22,"-"],[22,"-"]]},{"refId":"A","target":"AA","datapoints":[[null,1577980800000],[null,1578067200000]]}]@ then
return -1
endi
print =============== step21 - two query, 1 column, no timestamp, 2 column, with timestamp
system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"B","alias":"BB","sql":"select val from db.tb2 where ts >= 1577980800000 "},{"refId":"A","alias":"AA","sql":"select ts,val2 from db.tb2 where ts >= 1577980800000 "} ]' 127.0.0.1:7111/grafana/query
print step1-> $system_content
if $system_content != @[{"refId":"B","target":"BB","datapoints":[[22,"-"],[22,"-"]]},{"refId":"A","target":"AA","datapoints":[[223,1577980800000],[224,1578067200000]]}]@ then
return -1
endi
print =============== step22 - two query, 1 column, no timestamp, 3 column, with timestamp
system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"B","alias":"BB","sql":"select val from db.tb2 where ts >= 1577980800000 "},{"refId":"A","alias":"AA","sql":"select ts,val1, val2 from db.tb2 where ts >= 1577980800000 "} ]' 127.0.0.1:7111/grafana/query
print step1-> $system_content
if $system_content != @[{"refId":"B","target":"BB","datapoints":[[22,"-"],[22,"-"]]},{"refId":"A","target":"AA{val2:223,}","datapoints":[[213,1577980800000]]},{"refId":"A","target":"AA{val2:224,}","datapoints":[[214,1578067200000]]}]@ then
return -1
endi
print =============== step23 - two query, 2 column, no timestamp, 1 column, no timestamp
system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"B","alias":"BB","sql":"select val1,val2 from db.tb2 where ts >= 1577980800000 "},{"refId":"A","alias":"AA","sql":"select val1 from db.tb2 where ts >= 1577980800000 "} ]' 127.0.0.1:7111/grafana/query
print step1-> $system_content
if $system_content != @[{"refId":"B","target":"BB{val2:223,}","datapoints":[[213,"-"]]},{"refId":"B","target":"BB{val2:224,}","datapoints":[[214,"-"]]},{"refId":"A","target":"AA","datapoints":[[213,"-"],[214,"-"]]}]@ then
return -1
endi
print =============== step24 - two query, 2 column, no timestamp, 2 column, no timestamp
system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"B","alias":"BB","sql":"select val1,val2 from db.tb2 where ts >= 1577980800000 "},{"refId":"A","alias":"AA","sql":"select val,val1 from db.tb2 where ts >= 1577980800000 "} ]' 127.0.0.1:7111/grafana/query
print step1-> $system_content
if $system_content != @[{"refId":"B","target":"BB{val2:223,}","datapoints":[[213,"-"]]},{"refId":"B","target":"BB{val2:224,}","datapoints":[[214,"-"]]},{"refId":"A","target":"AA{val1:213,}","datapoints":[[22,"-"]]},{"refId":"A","target":"AA{val1:214,}","datapoints":[[22,"-"]]}]@ then
return -1
endi
print =============== step25 - two query, 2 column, no timestamp, 3 column, no timestamp
system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"B","alias":"BB","sql":"select val1,val2 from db.tb2 where ts >= 1577980800000 "},{"refId":"A","alias":"AA","sql":"select val,val1,val2 from db.tb2 where ts >= 1577980800000 "} ]' 127.0.0.1:7111/grafana/query
print step1-> $system_content
if $system_content != @[{"refId":"B","target":"BB{val2:223,}","datapoints":[[213,"-"]]},{"refId":"B","target":"BB{val2:224,}","datapoints":[[214,"-"]]},{"refId":"A","target":"AA{val1:213,, val2:223,}","datapoints":[[22,"-"]]},{"refId":"A","target":"AA{val1:214,, val2:224,}","datapoints":[[22,"-"]]}]@ then
return -1
endi
print =============== step26 - 2 column, no timestamp, NULL
system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"select * from db.tb3 "} ]' 127.0.0.1:7111/grafana/query
print step1-> $system_content
if $system_content != @[{"refId":"A","target":"{val1:nil, val2:nil}","datapoints":[[null,1577808000000],[null,1577894400000]]}]@ then
return -1
endi
sql create table tt (ts timestamp ,i int) tags(j binary(20),k binary(20));
sql insert into t1 using tt tags('jnetworki','t1') values('2020-01-01 00:00:00.000',1)('2020-01-01 00:01:00.000',2)('2020-01-01 00:02:00.000',3)('2020-01-01 00:03:00.000',4)('2020-01-01 00:04:00.000',5);
system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"select max(i) from db.tt where j like \u0027%network%\u0027 and ts >= \u00272020-01-01 00:00:00.000\u0027 and ts < \u00272020-01-01 00:05:00.000\u0027 interval(5m) group by k "} ]' 127.0.0.1:7111/grafana/query
print step1-> $system_content
if $system_content != @[{"refId":"A","target":"{k:t1}","datapoints":[[5,1577808000000]]}]@ then
return -1
endi
system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"select max(i) from db.tt where j like \u0027jnetwo%\u0027 and ts >= \u00272020-01-01 00:00:00.000\u0027 and ts < \u00272020-01-01 00:05:00.000\u0027 interval(5m) group by k "} ]' 127.0.0.1:7111/grafana/query
print step1-> $system_content
if $system_content != @[{"refId":"A","target":"{k:t1}","datapoints":[[5,1577808000000]]}]@ then
return -1
endi
system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"select max(i) from db.tt where j like \u0027%networki\u0027 and ts >= \u00272020-01-01 00:00:00.000\u0027 and ts < \u00272020-01-01 00:05:00.000\u0027 interval(5m) group by k "} ]' 127.0.0.1:7111/grafana/query
print step1-> $system_content
if $system_content != @[{"refId":"A","target":"{k:t1}","datapoints":[[5,1577808000000]]}]@ then
return -1
endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
system sh/stop_dnodes.sh
sleep 2000
system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c wallevel -v 0
system sh/cfg.sh -n dnode1 -c http -v 1
system sh/cfg.sh -n dnode1 -c maxSQLLength -v 340032
system sh/exec.sh -n dnode1 -s start
sleep 2000
sql connect
print ============================ dnode1 start
print =============== step1 - prepare data
sql create database d1
sql use d1
sql create table table_rest (ts timestamp, i int)
print sql length is 270KB
restful d1 table_rest 1591072800 10000 gzip
restful d1 table_rest 1591172800 10000 gzip
restful d1 table_rest 1591272800 10000 gzip
restful d1 table_rest 1591372800 10000 gzip
restful d1 table_rest 1591472800 10000 gzip
restful d1 table_rest 1591572800 10000 gzip
restful d1 table_rest 1591672800 10000 gzip
restful d1 table_rest 1591772800 10000 gzip
restful d1 table_rest 1591872800 10000 gzip
restful d1 table_rest 1591972800 10000 gzip
sql select * from table_rest;
print rows: $rows
if $rows != 100000 then
return -1
endi
system curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'select * from d1.table_rest' 127.0.0.1:7111/rest/sql --compressed
system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
{
"hostIp": "192.168.100.128",
"connNum": 1,
"insertModel": "insertTable",
"waitTime": 0,
"tableDesc": "ts timestamp i int",
"tablePrefix": "t",
"tablePerConn": 1,
"tableCreate": true,
"tableStart": 1,
"dbName": "db",
"dbReplica": 1,
"dbKeep": 3650,
"dbDays": 7,
"metricsName": "mt",
"tagNum": 10,
"dataNum": 100,
"dataBegin": 1485878400000,
"dataInterval": 1000,
"dataBatch": 1,
"dataInsert": true,
"dataRandom": false
}
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package main
import (
"bytes"
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"math/rand"
"net/http"
"os"
"sync"
"sync/atomic"
"time"
)
var (
token string
url string
config Config
request int64
begin time.Time
errorNum int64
)
type Config struct {
HostIp string `json:"hostIp"`
TableNum int `json:"tableNum"`
DbName string `json:"dbName"`
MetricsName string `json:"metricsName"`
DataNum int `json:"dataNum"`
BatchNum int `json:"batchNum"`
}
type TokenResult struct {
Status string `json:"status"`
Code int `json:"code"`
Desc string `json:"desc"`
}
type JsonResult struct {
Status string `json:"status"`
Code int `json:"code"`
}
func readFile(filename string) {
file, err := os.Open(filename)
if err != nil {
println("taos.json not found")
panic(err)
}
defer file.Close()
dec := json.NewDecoder(file)
err = dec.Decode(&config)
if err != nil {
println("taos.json parse error")
panic(err)
}
request = 0
errorNum = 0
fmt.Println("================config parameters======================")
fmt.Println("HostIp:", config.HostIp)
fmt.Println("TableNum:", config.TableNum)
fmt.Println("dbName:", config.DbName)
fmt.Println("metricsName:", config.MetricsName)
fmt.Println("dataNum:", config.DataNum)
fmt.Println("batchNum:", config.BatchNum)
fmt.Println("================http token=============================")
token, err = getToken()
url = fmt.Sprintf("http://%s:%d/rest/sql", config.HostIp, 6020)
fmt.Println("httpToken:", token)
fmt.Println("httpUrl:", url)
if err != nil {
panic(err)
}
}
func getToken() (string, error) {
resp, err := http.Get(fmt.Sprintf("http://%s:%d/rest/login/%s/%s", config.HostIp, 6020, "root", "taosdata"))
if err != nil {
return "", err
}
defer resp.Body.Close()
var tokenResult TokenResult
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
return "", err
}
err = json.Unmarshal(data, &tokenResult)
if err != nil {
return "", err
}
if tokenResult.Status != "succ" {
fmt.Println("get http token failed")
fmt.Println(tokenResult)
return "", err
}
return tokenResult.Desc, nil
}
func exec(client *http.Client, sql string) {
for reTryTimes := 0; reTryTimes < 10; reTryTimes++ {
req, err1 := http.NewRequest("POST", url, bytes.NewReader([]byte(sql)))
if err1 != nil {
continue
}
req.Header.Add("Authorization", "Taosd "+token)
resp, err := client.Do(req)
if err != nil {
continue
}
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
resp.Body.Close()
continue
}
var jsonResult JsonResult
err = json.Unmarshal(data, &jsonResult)
if err != nil {
resp.Body.Close()
continue
}
if jsonResult.Status != "succ" {
resp.Body.Close()
continue
}
atomic.AddInt64(&request, 1)
if (request*int64(config.BatchNum))%100000 == 0 && request != 0 {
spend := time.Since(begin).Seconds()
if spend >= 1 && spend < 10000000 {
total := (request - errorNum - 2 - int64(config.TableNum)) * int64(config.BatchNum)
fmt.Printf("request:%d, error:%d, insert:%d, spend:%.2f seconds, dps:%.1f \n", request, errorNum, total, spend, float64(total)/float64(spend))
}
}
return
}
//fmt.Println("exec failed, sql:", sql)
errorNum++
}
func createDb() {
fmt.Println("================create database =====================")
client := &http.Client{}
sql := fmt.Sprintf("create database %s", config.DbName)
exec(client, sql)
}
func createTb() {
fmt.Println("================create table ========================")
client := &http.Client{}
sql := fmt.Sprintf("create table %s.%s(ts timestamp, f1 int, f2 int) tags (tb int)", config.DbName, config.MetricsName)
exec(client, sql)
for i := 0; i < config.TableNum; i++ {
sql := fmt.Sprintf("create table %s.t%d using %s.%s tags(%d)", config.DbName, i, config.DbName, config.MetricsName, i)
exec(client, sql)
}
}
func insertData(wg *sync.WaitGroup, tableIndex int) {
defer wg.Done()
client := &http.Client{}
beginTime := int64(1519833600000)
for i := 0; i < config.DataNum; i += config.BatchNum {
var sql bytes.Buffer
sql.WriteString(fmt.Sprintf("insert into %s.t%d values", config.DbName, tableIndex))
for j := 0; j < config.BatchNum; j++ {
sql.WriteString(fmt.Sprintf("(%d,%d,%d)", beginTime+int64(i)+int64(j), rand.Intn(1000), rand.Intn(1000)))
}
exec(client, sql.String())
}
}
func main() {
filename := flag.String("config", "http.json", "config file name")
flag.Parse()
readFile(*filename)
fmt.Println("\n================http test start======================")
createDb()
createTb()
begin = time.Now()
var wg sync.WaitGroup
fmt.Println("================insert data ========================")
for i := 0; i < config.TableNum; i++ {
wg.Add(1)
go insertData(&wg, i)
}
wg.Wait()
fmt.Println("\n================http test stop ======================")
spend := time.Since(begin).Seconds()
total := (request - errorNum - 2 - int64(config.TableNum)) * int64(config.BatchNum)
fmt.Printf("request:%d, error:%d, insert:%d, spend:%.2f seconds, dps:%.1f \n", request, errorNum, total, spend, float64(total)/float64(spend))
}
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package main
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"math/rand"
"net/http"
"os"
"sync"
"sync/atomic"
"time"
"flag"
)
var (
token string
url string
config Config
request int64
period int64
errorNum int64
)
type Config struct {
HostIp string `json:"hostIp"`
ConnNum int `json:"connNum"`
InsertModel string `json:"insertModel"`
WaitTime int `json:"waitTime"`
TableDesc string `json:"tableDesc"`
TablePrefix string `json:"tablePrefix"`
TablePerConn int `json:"tablePerConn"`
TableCreate bool `json:"tableCreate"`
TableStart int `json:"tableStart"`
DbName string `json:"dbName"`
DbReplica int `json:"dbReplica"`
DbKeep int `json:"dbKeep"`
DbDays int `json:"dbDays"`
MetricsName string `json:"metricsName"`
TagNum int `json:"tagNum"`
DataNum int `json:"dataNum"`
DataBegin int64 `json:"dataBegin"`
DataInterval int `json:"dataInterval"`
DataBatch int `json:"dataBatch"`
DataInsert bool `json:"dataInsert"`
DataRandom bool `json:"dataRandom"`
}
type TokenResult struct {
Status string `json:"status"`
Code int `json:"code"`
Desc string `json:"desc"`
}
type JsonResult struct {
Status string `json:"status"`
Code int `json:"code"`
}
func readFile(filename string) {
file, err := os.Open(filename)
if err != nil {
println("taos_cloud.json not found")
panic(err)
}
defer file.Close()
dec := json.NewDecoder(file)
err = dec.Decode(&config)
if err != nil {
println("taos_cloud.json parse error")
panic(err)
}
if config.TagNum <= 0 {
config.TagNum = 1
}
request = 0
period = 0
errorNum = 0
fmt.Println("================config parameters======================")
fmt.Println("HostIp:", config.HostIp)
fmt.Println("connNum:", config.ConnNum)
fmt.Println("insertModel:", config.InsertModel)
fmt.Println("waitTime:", config.WaitTime)
fmt.Println("tableDesc:", config.TableDesc)
fmt.Println("tablePrefix:", config.TablePrefix)
fmt.Println("tablePerConn:", config.TablePerConn)
fmt.Println("tableCreate:", config.TableCreate)
fmt.Println("tableStart:", config.TableStart)
fmt.Println("dbName:", config.DbName)
fmt.Println("dbReplica:", config.DbReplica)
fmt.Println("dbKeep:", config.DbKeep)
fmt.Println("dbDays:", config.DbDays)
fmt.Println("metricsName:", config.MetricsName)
fmt.Println("tagNum:", config.TagNum)
fmt.Println("dataNum:", config.DataNum)
fmt.Println("dataBegin:", config.DataBegin)
fmt.Println("dataInterval:", config.DataInterval)
fmt.Println("dataBatch:", config.DataBatch)
fmt.Println("dataInsert:", config.DataInsert)
fmt.Println("dataRandom:", config.DataRandom)
fmt.Println("================http token=============================")
token, err = getToken()
url = fmt.Sprintf("http://%s:%d/rest/sql", config.HostIp, 6020)
fmt.Println("httpToken:", token)
fmt.Println("httpUrl:", url)
if err != nil {
panic(err)
}
}
func getToken() (string, error) {
resp, err := http.Get(fmt.Sprintf("http://%s:%d/rest/login/%s/%s", config.HostIp, 6020, "tiger", "tiger"))
if err != nil {
return "", err
}
defer resp.Body.Close()
var tokenResult TokenResult
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
return "", err
}
err = json.Unmarshal(data, &tokenResult)
if err != nil {
return "", err
}
if tokenResult.Status != "succ" {
fmt.Println("get http token failed")
fmt.Println(tokenResult)
return "", err
}
return tokenResult.Desc, nil
}
func exec(client *http.Client, sql string) {
for times := 0; times < 10; times++ {
req, err1 := http.NewRequest("POST", url, bytes.NewReader([]byte(sql)))
if err1 != nil {
continue
}
req.Header.Add("Authorization", "Taosd "+token)
begin := time.Now()
resp, err := client.Do(req)
if err != nil {
continue
}
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
resp.Body.Close()
continue
}
spend := (time.Since(begin).Nanoseconds())
var jsonResult JsonResult
err = json.Unmarshal(data, &jsonResult)
if err != nil {
resp.Body.Close()
continue
}
if jsonResult.Status != "succ" {
resp.Body.Close()
continue
}
atomic.AddInt64(&request, 1)
if (request < 103) {
return
}
atomic.AddInt64(&period, spend)
if request%5000 == 0 && request != 0 {
requestAvg := float64(period) / float64(1000000) / float64(request)
qps := float64(1000) / float64(requestAvg) * float64(config.ConnNum)
dps := qps * float64(config.DataBatch)
fmt.Println("====== req:", request, ", error:", errorNum, ", qps:", int64(qps), ", wait:", int64(requestAvg), "ms", ", data per second:", int64(dps))
}
return
}
fmt.Println("xxxx>sql:", sql, ", retryTimes:", 10)
errorNum++
}
func createDb() {
if !config.TableCreate {
return
}
client := &http.Client{}
fmt.Println("\n================create database =====================")
sql := fmt.Sprintf("create database %s keep %d", config.DbName, config.DbKeep)
exec(client, sql)
}
func createTb() {
if !config.TableCreate {
return
}
client := &http.Client{}
fmt.Println("\n================create table ========================")
sql := fmt.Sprintf("create table %s.%s(%s) tags (orgno int)", config.DbName, config.MetricsName, config.TableDesc)
exec(client, sql)
tbNum := config.TablePerConn*config.ConnNum + config.TableStart
for i := config.TableStart; i < tbNum; i++ {
sql := fmt.Sprintf("create table %s.%s%d using %s.%s tags(%d)", config.DbName, config.TablePrefix, i, config.DbName, config.MetricsName, i%config.TagNum+1)
exec(client, sql)
}
}
func insertTable(conn int) {
client := &http.Client{}
tbStart := conn*config.TablePerConn + config.TableStart
tmStart := config.DataBegin
for i := 0; i < config.TablePerConn; i++ {
tbIndex := i + tbStart
for j := 0; j < config.DataNum; j++ {
tmVal := int64(j)*int64(config.DataInterval) + tmStart
dataVal := j
if config.DataRandom {
dataVal = rand.Intn(1000)
}
sql := fmt.Sprintf("insert into %s.%s%d values(%d, %d)", config.DbName, config.TablePrefix, tbIndex, tmVal, dataVal)
for k := 1; k < config.DataBatch; k++ {
tmVal := int64(j)*int64(config.DataInterval) + int64(k) + tmStart
dataVal := j + k
if config.DataRandom {
dataVal = rand.Intn(1000)
}
sql += fmt.Sprintf("(%d, %d)", tmVal, dataVal)
}
j += (config.DataBatch - 1)
exec(client, sql)
if config.WaitTime != 0 {
time.Sleep(time.Millisecond * time.Duration(config.WaitTime))
}
}
}
}
func insertLoop(conn int) {
client := &http.Client{}
tbStart := conn*config.TablePerConn + config.TableStart
tmStart := config.DataBegin
for j := 0; j < config.DataNum; j++ {
for i := 0; i < config.TablePerConn; i++ {
tbIndex := i + tbStart
tmVal := int64(j)*int64(config.DataInterval) + tmStart
dataVal := j
if config.DataRandom {
dataVal = rand.Intn(1000)
}
sql := fmt.Sprintf("insert into %s.%s%d values(%d, %d)", config.DbName, config.TablePrefix, tbIndex, tmVal, dataVal)
for k := 1; k < config.DataBatch; k++ {
tmVal := int64(j)*int64(config.DataInterval) + int64(k) + tmStart
dataVal := j + k
if config.DataRandom {
dataVal = rand.Intn(1000)
}
sql += fmt.Sprintf("values(%d, %d)", tmVal, dataVal)
}
j += (config.DataBatch - 1)
exec(client, sql)
if config.WaitTime != 0 {
time.Sleep(time.Millisecond * time.Duration(config.WaitTime))
}
}
}
}
func insertTb(wg *sync.WaitGroup, conn int) {
defer wg.Done()
if !config.DataInsert {
return
}
if config.InsertModel == "insertTable" {
insertTable(conn)
} else {
insertLoop(conn)
}
}
func selectData(wg *sync.WaitGroup, conn int) {
defer wg.Done()
client := &http.Client{}
for i := 0; i < config.DataNum; i++ {
exec(client, config.TableDesc)
}
}
func main() {
filename := flag.String("config", "taos_cloud.json", "config file name")
flag.Parse()
readFile(*filename)
fmt.Println("\n================http test start======================")
var wg sync.WaitGroup
if config.InsertModel == "selectData" {
fmt.Println("\n================select data ========================")
for i := 0; i < config.ConnNum; i++ {
wg.Add(1)
go selectData(&wg, i)
}
} else {
createDb()
createTb()
if config.DataInsert {
fmt.Println("\n================insert data ========================")
}
for i := 0; i < config.ConnNum; i++ {
wg.Add(1)
go insertTb(&wg, i)
}
}
wg.Wait()
fmt.Println("\n================http test stop ======================")
requestAvg := float64(period) / float64(1000000) / float64(request)
qps := float64(1000) / float64(requestAvg) * float64(config.ConnNum)
dps := qps * float64(config.DataBatch)
fmt.Println("====== req:", request, ", error:", errorNum, ", qps:", int64(qps), ", wait:", int64(requestAvg), "ms", ", data per second:", int64(dps))
}
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package main
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"math/rand"
"net/http"
"os"
"sync"
"sync/atomic"
"time"
"flag"
)
var (
token string
url string
config Config
request int64
period int64
errorNum int64
)
type Config struct {
HostIp string `json:"hostIp"`
ConnNum int `json:"connNum"`
InsertModel string `json:"insertModel"`
WaitTime int `json:"waitTime"`
TableDesc string `json:"tableDesc"`
TablePrefix string `json:"tablePrefix"`
TablePerConn int `json:"tablePerConn"`
TableCreate bool `json:"tableCreate"`
TableStart int `json:"tableStart"`
DbName string `json:"dbName"`
DbReplica int `json:"dbReplica"`
DbKeep int `json:"dbKeep"`
DbDays int `json:"dbDays"`
MetricsName string `json:"metricsName"`
TagNum int `json:"tagNum"`
DataNum int `json:"dataNum"`
DataBegin int64 `json:"dataBegin"`
DataInterval int `json:"dataInterval"`
DataBatch int `json:"dataBatch"`
DataInsert bool `json:"dataInsert"`
DataRandom bool `json:"dataRandom"`
}
type TokenResult struct {
Status string `json:"status"`
Code int `json:"code"`
Desc string `json:"desc"`
}
type JsonResult struct {
Status string `json:"status"`
Code int `json:"code"`
}
func readFile(filename string) {
file, err := os.Open(filename)
if err != nil {
println("taos_cloud.json not found")
panic(err)
}
defer file.Close()
dec := json.NewDecoder(file)
err = dec.Decode(&config)
if err != nil {
println("taos_cloud.json parse error")
panic(err)
}
if config.TagNum <= 0 {
config.TagNum = 1
}
request = 0
period = 0
errorNum = 0
fmt.Println("================config parameters======================")
fmt.Println("HostIp:", config.HostIp)
fmt.Println("connNum:", config.ConnNum)
fmt.Println("insertModel:", config.InsertModel)
fmt.Println("waitTime:", config.WaitTime)
fmt.Println("tableDesc:", config.TableDesc)
fmt.Println("tablePrefix:", config.TablePrefix)
fmt.Println("tablePerConn:", config.TablePerConn)
fmt.Println("tableCreate:", config.TableCreate)
fmt.Println("tableStart:", config.TableStart)
fmt.Println("dbName:", config.DbName)
fmt.Println("dbReplica:", config.DbReplica)
fmt.Println("dbKeep:", config.DbKeep)
fmt.Println("dbDays:", config.DbDays)
fmt.Println("metricsName:", config.MetricsName)
fmt.Println("tagNum:", config.TagNum)
fmt.Println("dataNum:", config.DataNum)
fmt.Println("dataBegin:", config.DataBegin)
fmt.Println("dataInterval:", config.DataInterval)
fmt.Println("dataBatch:", config.DataBatch)
fmt.Println("dataInsert:", config.DataInsert)
fmt.Println("dataRandom:", config.DataRandom)
fmt.Println("================http token=============================")
token, err = getToken()
url = fmt.Sprintf("http://%s:%d/rest/sql", config.HostIp, 6020)
fmt.Println("httpToken:", token)
fmt.Println("httpUrl:", url)
if err != nil {
panic(err)
}
}
func getToken() (string, error) {
resp, err := http.Get(fmt.Sprintf("http://%s:%d/rest/login/%s/%s", config.HostIp, 6020, "root", "taosdata"))
if err != nil {
return "", err
}
defer resp.Body.Close()
var tokenResult TokenResult
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
return "", err
}
err = json.Unmarshal(data, &tokenResult)
if err != nil {
return "", err
}
if tokenResult.Status != "succ" {
fmt.Println("get http token failed")
fmt.Println(tokenResult)
return "", err
}
return tokenResult.Desc, nil
}
func exec(client *http.Client, sql string) {
for times := 0; times < 1; times++ {
req, err1 := http.NewRequest("POST", url, bytes.NewReader([]byte(sql)))
if err1 != nil {
continue
}
req.Header.Add("Authorization", "Taosd "+token)
begin := time.Now()
resp, err := client.Do(req)
if err != nil {
continue
}
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
resp.Body.Close()
continue
}
spend := (time.Since(begin).Nanoseconds())
var jsonResult JsonResult
err = json.Unmarshal(data, &jsonResult)
if err != nil {
resp.Body.Close()
continue
}
if jsonResult.Status != "succ" {
resp.Body.Close()
continue
}
atomic.AddInt64(&request, 1)
if (request < 103) {
return
}
atomic.AddInt64(&period, spend)
if request%5000 == 0 && request != 0 {
requestAvg := float64(period) / float64(1000000) / float64(request)
qps := float64(1000) / float64(requestAvg) * float64(config.ConnNum)
dps := qps * float64(config.DataBatch)
fmt.Println("====== req:", request, ", error:", errorNum, ", qps:", int64(qps), ", wait:", int64(requestAvg), "ms", ", data per second:", int64(dps))
}
return
}
fmt.Println("xxxx>sql:", sql, ", retryTimes:", 10)
errorNum++
}
func createDb() {
if !config.TableCreate {
return
}
client := &http.Client{}
fmt.Println("\n================create database =====================")
sql := fmt.Sprintf("create database %s keep %d", config.DbName, config.DbKeep)
exec(client, sql)
}
func createTb() {
if !config.TableCreate {
return
}
client := &http.Client{}
fmt.Println("\n================create table ========================")
sql := fmt.Sprintf("create table %s.%s(%s) tags (orgno int)", config.DbName, config.MetricsName, config.TableDesc)
exec(client, sql)
tbNum := config.TablePerConn*config.ConnNum + config.TableStart
for i := config.TableStart; i < tbNum; i++ {
sql := fmt.Sprintf("create table %s.%s%d using %s.%s tags(%d)", config.DbName, config.TablePrefix, i, config.DbName, config.MetricsName, i%config.TagNum+1)
exec(client, sql)
}
}
func insertTable(conn int) {
client := &http.Client{}
tbStart := conn*config.TablePerConn + config.TableStart
tmStart := config.DataBegin
for j := 0; j < config.DataNum; j++ {
for i := 0; i < config.TablePerConn; i++ {
tbIndex := i + tbStart
tmVal := int64(j)*int64(config.DataInterval) + tmStart
dataVal := j
if config.DataRandom {
dataVal = rand.Intn(1000)
}
sql := fmt.Sprintf("insert into %s.%s%d values(%d, %d)", config.DbName, config.TablePrefix, tbIndex, tmVal, dataVal)
for k := 1; k < config.DataBatch; k++ {
tmVal := int64(j)*int64(config.DataInterval) + int64(k) + tmStart
dataVal := j + k
if config.DataRandom {
dataVal = rand.Intn(1000)
}
sql += fmt.Sprintf("(%d, %d)", tmVal, dataVal)
}
exec(client, sql)
if config.WaitTime != 0 {
time.Sleep(time.Millisecond * time.Duration(config.WaitTime))
}
}
j += (config.DataBatch - 1)
}
}
func insertLoop(conn int) {
client := &http.Client{}
tbStart := conn*config.TablePerConn + config.TableStart
tmStart := config.DataBegin
for j := 0; j < config.DataNum; j++ {
for i := 0; i < config.TablePerConn; i++ {
tbIndex := i + tbStart
tmVal := int64(j)*int64(config.DataInterval) + tmStart
dataVal := j
if config.DataRandom {
dataVal = rand.Intn(1000)
}
sql := fmt.Sprintf("insert into %s.%s%d values(%d, %d)", config.DbName, config.TablePrefix, tbIndex, tmVal, dataVal)
for k := 1; k < config.DataBatch; k++ {
tmVal := int64(j)*int64(config.DataInterval) + int64(k) + tmStart
dataVal := j + k
if config.DataRandom {
dataVal = rand.Intn(1000)
}
sql += fmt.Sprintf("values(%d, %d)", tmVal, dataVal)
}
j += (config.DataBatch - 1)
exec(client, sql)
if config.WaitTime != 0 {
time.Sleep(time.Millisecond * time.Duration(config.WaitTime))
}
}
}
}
func insertTb(wg *sync.WaitGroup, conn int) {
defer wg.Done()
if !config.DataInsert {
return
}
if config.InsertModel == "insertTable" {
insertTable(conn)
} else {
insertLoop(conn)
}
}
func selectData(wg *sync.WaitGroup, conn int) {
defer wg.Done()
client := &http.Client{}
for i := 0; i < config.DataNum; i++ {
exec(client, config.TableDesc)
}
}
func main() {
filename := flag.String("config", "taos_cloud.json", "config file name")
flag.Parse()
readFile(*filename)
fmt.Println("\n================http test start======================")
var wg sync.WaitGroup
if config.InsertModel == "selectData" {
fmt.Println("\n================select data ========================")
for i := 0; i < config.ConnNum; i++ {
wg.Add(1)
go selectData(&wg, i)
}
} else {
createDb()
createTb()
if config.DataInsert {
fmt.Println("\n================insert data ========================")
}
for i := 0; i < config.ConnNum; i++ {
wg.Add(1)
go insertTb(&wg, i)
}
}
wg.Wait()
fmt.Println("\n================http test stop ======================")
requestAvg := float64(period) / float64(1000000) / float64(request)
qps := float64(1000) / float64(requestAvg) * float64(config.ConnNum)
dps := qps * float64(config.DataBatch)
fmt.Println("====== req:", request, ", error:", errorNum, ", qps:", int64(qps), ", wait:", int64(requestAvg), "ms", ", data per second:", int64(dps))
}
system sh/stop_dnodes.sh
sleep 2000
system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c walLevel -v 1
system sh/cfg.sh -n dnode1 -c http -v 1
system sh/exec.sh -n dnode1 -s start
sleep 2000
sql connect
print ============================ dnode1 start
print =============== step1 - prepare data
sql create database d1
sql use d1
sql create table t1 (ts timestamp, i int, b binary(100))
sql insert into t1 values('2017-12-25 21:28:41.022', 1, 11)
sql insert into t1 values('2017-12-25 21:28:42.022', 2, '22')
sql insert into t1 values('2017-12-25 21:28:43.022', 3, "33")
sql insert into t1 values('2017-12-25 21:28:44.022', 4, '44"')
sql insert into t1 values('2017-12-25 21:28:45.022', 5, "55'")
sql insert into t1 values('2017-12-25 21:28:46.022', 6, "66'6'")
sql insert into t1 values('2017-12-25 21:28:47.022', 7, '77"7"')
sql insert into t1 values('2017-12-25 21:28:48.022', 8, '88""88')
sql insert into t1 values('2017-12-25 21:28:49.022', 9, '99\99')
sql insert into t1 values('2017-12-25 21:28:51.022', 11, '11\\11')
sql insert into t1 values('2017-12-25 21:28:52.022', 12, '22\\11')
sql insert into t1 values('2017-12-25 21:28:53.022', 13, '33\\"33')
sql insert into t1 values('2017-12-25 21:28:54.022', 14, '44\\""44')
sleep 4000
print =============== step2 - login
system_content curl 127.0.0.1:7111/rest/login/root/taosdata
print curl 127.0.0.1:7111/rest/login/root/taosdata -----> $system_content
if $system_content != @{"status":"succ","code":0,"desc":"/KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04"}@ then
return -1
endi
return
print =============== step3 - query data
system_content curl -H 'Authorization: Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpc3MiOiJ3d3cudGFvc2RhdGEuY29tIiwia2V5IjoiYkVsOExjdkxVZDdhOHFkdE5abXRPTnJ5cDIwMW1hMDQiLCJzdWIiOiJyb290In0.k7CkgmpOJImIkLqZqzASlPmkdeEw7Wfk4XUrqGZX-LQ' -d 'select * from t1' 127.0.0.1:7111/rest/sql/d1
print curl 127.0.0.1:7111/rest/sql/d1 -----> $system_content
if $system_content != @{"status":"succ","head":["ts","i","b"],"data":[["2017-12-25 21:28:54.022",14,"44\\\\\"\"44"],["2017-12-25 21:28:53.022",13,"33\\\\\"33"],["2017-12-25 21:28:52.022",12,"22\\\\11"],["2017-12-25 21:28:51.022",11,"11\\\\11"],["2017-12-25 21:28:49.022",9,"99\\99"],["2017-12-25 21:28:48.022",8,"88\"\"88"],["2017-12-25 21:28:47.022",7,"77\"7\""],["2017-12-25 21:28:46.022",6,"66'6'"],["2017-12-25 21:28:45.022",5,"55'"],["2017-12-25 21:28:44.022",4,"44\""],["2017-12-25 21:28:43.022",3,"33"],["2017-12-25 21:28:42.022",2,"22"],["2017-12-25 21:28:41.022",1,"11"]],"rows":13}@ then
return -1
endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c wallevel -v 0
system sh/cfg.sh -n dnode1 -c http -v 1
system sh/cfg.sh -n dnode1 -c httpEnableRecordSql -v 1
system sh/exec.sh -n dnode1 -s start
sql connect
print ============================ dnode1 start
print =============== step1 - prepare data
sql create database d1
sql use d1
sql create table table_rest (ts timestamp, i int)
sql insert into table_rest values('2017-12-25 21:28:41.022', 1)
sql insert into table_rest values('2017-12-25 21:28:42.022', 2)
sql insert into table_rest values('2017-12-25 21:28:43.022', 3)
sql insert into table_rest values('2017-12-25 21:28:44.022', 4)
sql insert into table_rest values('2017-12-25 21:28:45.022', 5)
sql insert into table_rest values('2017-12-25 21:28:46.022', 6)
sql insert into table_rest values('2017-12-25 21:28:47.022', 7)
sql insert into table_rest values('2017-12-25 21:28:48.022', 8)
sql insert into table_rest values('2017-12-25 21:28:49.022', 9)
sql insert into table_rest values('2017-12-25 21:28:50.022', 10)
print =============== step2 - login
system_content curl 127.0.0.1:7111/rest/login/root/taosdata
print curl 127.0.0.1:7111/rest/login/root/taosdata -----> $system_content
if $system_content != {"status":"succ","code":0,"desc":"/KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04"} then
return -1
endi
print =============== step3 - query data
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'select * from d1.table_rest' 127.0.0.1:7111/rest/sql
print curl 127.0.0.1:7111/rest/sql -----> $system_content
if $system_content != @{"status":"succ","head":["ts","i"],"column_meta":[["ts",9,8],["i",4,4]],"data":[["2017-12-25 21:28:41.022",1],["2017-12-25 21:28:42.022",2],["2017-12-25 21:28:43.022",3],["2017-12-25 21:28:44.022",4],["2017-12-25 21:28:45.022",5],["2017-12-25 21:28:46.022",6],["2017-12-25 21:28:47.022",7],["2017-12-25 21:28:48.022",8],["2017-12-25 21:28:49.022",9],["2017-12-25 21:28:50.022",10]],"rows":10}@ then
return -1
endi
print =============== step4 - insert data
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d "insert into d1.table_rest values('2017-12-25 21:28:51.022', 11)" 127.0.0.1:7111/rest/sql
print curl 127.0.0.1:7111/rest/sql -----> $system_content
if $system_content != @{"status":"succ","head":["affected_rows"],"column_meta":[["affected_rows",4,4]],"data":[[1]],"rows":1}@ then
return -1
endi
print =============== step5 - query data
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'select * from d1.table_rest' 127.0.0.1:7111/rest/sql
print curl 127.0.0.1:7111/rest/sql -----> $system_content
if $system_content != @{"status":"succ","head":["ts","i"],"column_meta":[["ts",9,8],["i",4,4]],"data":[["2017-12-25 21:28:41.022",1],["2017-12-25 21:28:42.022",2],["2017-12-25 21:28:43.022",3],["2017-12-25 21:28:44.022",4],["2017-12-25 21:28:45.022",5],["2017-12-25 21:28:46.022",6],["2017-12-25 21:28:47.022",7],["2017-12-25 21:28:48.022",8],["2017-12-25 21:28:49.022",9],["2017-12-25 21:28:50.022",10],["2017-12-25 21:28:51.022",11]],"rows":11}@ then
return -1
endi
print =============== step6 - query no db data
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'show dnodes' 127.0.0.1:7111/rest/sql
print curl 127.0.0.1:7111/rest/sql -----> $system_content
print =============== step7 - change password
sql create user u1 PASS 'abcd@1234'
sql create user u2 PASS 'abcd_1234'
system_content curl 127.0.0.1:7111/rest/login/u1/abcd@1234
print curl 127.0.0.1:7111/rest/login/u1/abcd@1234 -----> $system_content
if $system_content != @{"status":"succ","code":0,"desc":"jIlItaLFFIPa8qdtNZmtONryp201ma04SXX8PEJowKAB/46k1gwnPNryp201ma04"}@ then
return -1
endi
system_content curl 127.0.0.1:7111/rest/login/u2/aabcd_1234
print curl 127.0.0.1:7111/rest/login/u2/abcd_1234 -----> $system_content
if $system_content != @{"status":"error","code":3,"desc":"Authentication failure"}@ then
return -1
endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT
system sh/stop_dnodes.sh
sleep 2000
system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c walLevel -v 1
system sh/cfg.sh -n dnode1 -c http -v 1
system sh/cfg.sh -n dnode1 -c httpDbNameMandatory -v 1
system sh/exec.sh -n dnode1 -s start
sleep 2000
sql connect
sql drop database if exists db
print ============================ dnode1 start
print =============== step1 - login
system_content curl 127.0.0.1:7111/rest/login/root/taosdata
print curl 127.0.0.1:7111/rest/login/root/taosdata -----> $system_content
if $system_content != @{"status":"succ","code":0,"desc":"/KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04"}@ then
return -1
endi
print =============== step2 - execute sql without db_name
print curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'show databases' 127.0.0.1:7111/rest/sql
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'show databases' 127.0.0.1:7111/rest/sql
if $system_content != @{"status":"error","code":4354,"desc":"invalid url format"}@ then
return -1
endi
print curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'create database if not exists db' 127.0.0.1:7111/rest/sql
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'create database if not exists db' 127.0.0.1:7111/rest/sql
if $system_content != @{"status":"error","code":4354,"desc":"invalid url format"}@ then
return -1
endi
print curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'create table table_rest (ts timestamp, i int)' 127.0.0.1:7111/rest/sql
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'create table table_rest (ts timestamp, i int)' 127.0.0.1:7111/rest/sql
if $system_content != @{"status":"error","code":4354,"desc":"invalid url format"}@ then
return -1
endi
print curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'insert into table_rest (now, 1)' 127.0.0.1:7111/rest/sql
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'insert into table_rest values (now, 1)' 127.0.0.1:7111/rest/sql
if $system_content != @{"status":"error","code":4354,"desc":"invalid url format"}@ then
return -1
endi
print curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'select * from table_rest' 127.0.0.1:7111/rest/sql
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'select * from table_rest' 127.0.0.1:7111/rest/sql
if $system_content != @{"status":"error","code":4354,"desc":"invalid url format"}@ then
return -1
endi
print curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'drop database if exists db' 127.0.0.1:7111/rest/sql
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'drop database if exists db' 127.0.0.1:7111/rest/sql
if $system_content != @{"status":"error","code":4354,"desc":"invalid url format"}@ then
return -1
endi
print =============== step3 - execute sql with db_name
print curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'show databases' 127.0.0.1:7111/rest/sql/databases
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'show databases' 127.0.0.1:7111/rest/sql/databases
if $system_content != @{"status":"succ","head":["name","created_time","ntables","vgroups","replica","quorum","days","keep","cache(MB)","blocks","minrows","maxrows","wallevel","fsync","comp","cachelast","precision","update","status"],"column_meta":[["name",8,32],["created_time",9,8],["ntables",4,4],["vgroups",4,4],["replica",3,2],["quorum",3,2],["days",3,2],["keep",8,24],["cache(MB)",4,4],["blocks",4,4],["minrows",4,4],["maxrows",4,4],["wallevel",2,1],["fsync",4,4],["comp",2,1],["cachelast",2,1],["precision",8,3],["update",2,1],["status",8,10]],"data":[],"rows":0}@ then
return -1
endi
print curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'create database if not exists db' 127.0.0.1:7111/rest/sql/db
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'create database if not exists db' 127.0.0.1:7111/rest/sql/db
if $system_content != @{"status":"succ","head":["affected_rows"],"column_meta":[["affected_rows",4,4]],"data":[[0]],"rows":1}@ then
return -1
endi
print curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'create table table_rest (ts timestamp, i int)' 127.0.0.1:7111/rest/sql/db
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'create table table_rest (ts timestamp, i int)' 127.0.0.1:7111/rest/sql/db
if $system_content != @{"status":"succ","head":["affected_rows"],"column_meta":[["affected_rows",4,4]],"data":[[0]],"rows":1}@ then
return -1
endi
print curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'insert into table_rest (now, 1)' 127.0.0.1:7111/rest/sql/db
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'insert into table_rest values (1629904789233, 1)' 127.0.0.1:7111/rest/sql/db
if $system_content != @{"status":"succ","head":["affected_rows"],"column_meta":[["affected_rows",4,4]],"data":[[1]],"rows":1}@ then
return -1
endi
print curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'select * from table_rest' 127.0.0.1:7111/rest/sql/db
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'select * from table_rest' 127.0.0.1:7111/rest/sql/db
if $system_content != @{"status":"succ","head":["ts","i"],"column_meta":[["ts",9,8],["i",4,4]],"data":[["2021-08-25 23:19:49.233",1]],"rows":1}@ then
return -1
endi
print curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'drop database if exists db' 127.0.0.1:7111/rest/sql/db
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'drop database if exists db' 127.0.0.1:7111/rest/sql/db
if $system_content != @{"status":"succ","head":["affected_rows"],"column_meta":[["affected_rows",4,4]],"data":[[0]],"rows":1}@ then
return -1
endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT
system sh/stop_dnodes.sh
sleep 2000
system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c wallevel -v 0
system sh/cfg.sh -n dnode1 -c http -v 1
system sh/exec.sh -n dnode1 -s start
#sleep 2000
sql connect
print ============================ dnode1 start
print =============== step1 - login
system_content curl 127.0.0.1:7111/rest/
print 1-> $system_content
if $system_content != @{"status":"error","code":4357,"desc":"no auth info input"}@ then
print $system_content
return -1
endi
system_content curl 127.0.0.1:7111/rest/xx
print 2-> $system_content
if $system_content != @{"status":"error","code":4357,"desc":"no auth info input"}@ then
return -1
endi
system_content curl 127.0.0.1:7111/rest/login
print 3-> $system_content
if $system_content != @{"status":"error","code":4357,"desc":"no auth info input"}@ then
return -1
endi
#4
system_content curl 127.0.0.1:7111/rest/login/root
print 4-> $system_content
if $system_content != @{"status":"error","code":4357,"desc":"no auth info input"}@ then
return -1
endi
system_content curl 127.0.0.1:7111/rest/login/root/123
print 5-> $system_content
if $system_content != @{"status":"error","code":3,"desc":"Authentication failure"}@ then
return -1
endi
system_content curl 127.0.0.1:7111/rest/login/root/123/1/1/3
print 6-> $system_content
if $system_content != @{"status":"error","code":3,"desc":"Authentication failure"}@ then
return -1
endi
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'show databases' 127.0.0.1:7111/rest/login/root/1
print 7-> $system_content
if $system_content != @{"status":"error","code":3,"desc":"Authentication failure"}@ then
return -1
endi
#8
system_content curl -H 'Authorization: Beare eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9' -d 'show databases' 127.0.0.1:7111/rest/login/root/1
print 8-> $system_content
if $system_content != @{"status":"error","code":4386,"desc":"invalid type of Authorization"}@ then
return -1
endi
system_content curl -H 'Authorization: Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpc3MiOiJ3d3cudGFvc2RhdGEuY29tIiwicGFzcyI6InRhb3NkYXRhIiwic3ViIjoicm9vdCJ9.xPv3b5odlR7YF8G_QWASjIRbMtA5v4ItToJ35fFgi' -d 'show databases' 127.0.0.1:7111/rest/login/root/1
print 8-> $system_content
if $system_content != @{"status":"error","code":4387,"desc":"invalid format of Authorization"}@ then
return -1
endi
system_content curl -H 'Authorization: eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpc3MiOiJ3d3cudGFvc2RhdGEuY29tIiwicGFzcyI6InRhb3NkYXRhIiwic3ViIjoicm9vdCJ9.xPv3b5odlR7YF8G_QWASjIRbMtA5v4ItToJ35fFgi' -d 'show databases' 127.0.0.1:7111/rest/login/root/1
print 9-> $system_content
if $system_content != @{"status":"error","code":4387,"desc":"invalid format of Authorization"}@ then
return -1
endi
sleep 2000
system_content curl 127.0.0.1:7111/rest/login/root/taosdata/
print 10-> $system_content
if $system_content != @{"status":"succ","code":0,"desc":"/KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04"}@ then
return -1
endi
print =============== step2 - no db
#11
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'show databases' 127.0.0.1:7111/rest/sql
print 11-> $system_content
if $system_content != @{"status":"succ","head":["name","created_time","ntables","vgroups","replica","quorum","days","keep0,keep1,keep2","cache(MB)","blocks","minrows","maxrows","wallevel","fsync","comp","cachelast","precision","update","status"],"column_meta":[["name",8,32],["created_time",9,8],["ntables",4,4],["vgroups",4,4],["replica",3,2],["quorum",3,2],["days",3,2],["keep0,keep1,keep2",8,24],["cache(MB)",4,4],["blocks",4,4],["minrows",4,4],["maxrows",4,4],["wallevel",2,1],["fsync",4,4],["comp",2,1],["cachelast",2,1],["precision",8,3],["update",2,1],["status",8,10]],"data":[],"rows":0}@ then
return -1
endi
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'create database d1' 127.0.0.1:7111/rest/sql
print 12-> $system_content
if $system_content != @{"status":"succ","head":["affected_rows"],"column_meta":[["affected_rows",4,4]],"data":[[0]],"rows":1}@ then
return -1
endi
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'create database d1' 127.0.0.1:7111/rest/sql
print 13-> $system_content
if $system_content != @{"status":"error","code":897,"desc":"Database already exists"}@ then
return -1
endi
#14
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d '' 127.0.0.1:7111/rest/sql
print 14-> $system_content
if $system_content != @{"status":"error","code":4359,"desc":"no sql input"}@ then
return -1
endi
#system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'use d1' 127.0.0.1:7111/rest/sql
#print 15-> $system_content
#if $system_content != @{"status":"error","code":5017,"desc":"no need to execute use db cmd"}@ then
#if $system_content != @{"status":"succ","head":["affected_rows"],"data":[[1]],"rows":1}@ then
# return -1
#endi
#system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d ' use d1' 127.0.0.1:7111/rest/sql
#print 16-> $system_content
#if $system_content != @{"status":"error","code":5017,"desc":"no need to execute use db cmd"}@ then
# return -1
#endi
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d ' used1' 127.0.0.1:7111/rest/sql
print 17-> $system_content
if $system_content != @{"status":"error","code":534,"desc":"Syntax error in SQL"}@ then
return -1
endi
#18
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d ' show tables;' 127.0.0.1:7111/rest/sql
print 18-> $system_content
if $system_content != @{"status":"error","code":896,"desc":"Database not specified or available"}@ then
return -1
endi
print =============== step3 - db
#19
#system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d ' show tables;' 127.0.0.1:7111/rest/sql/d4
#print 19-> $system_content
#if $system_content != @{"status":"error","code":1000,"desc":"invalid DB"}@ then
# return -1
#endi
#system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d ' show tables;' 127.0.0.1:7111/rest/sql/d1
#print 20-> $system_content
#if $system_content != @{"status":"succ","head":["name","created time","columns","metric"],"data":[],"rows":0}@ then
# return -1
#endi
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d ' select * from d1.t1;' 127.0.0.1:7111/rest/sql
print 21-> $system_content
if $system_content != @{"status":"error","code":866,"desc":"Table does not exist"}@ then
return -1
endi
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d ' create table d1.t1 (ts timestamp, speed int)' 127.0.0.1:7111/rest/sql
print 22-> $system_content
if $system_content != @{"status":"succ","head":["affected_rows"],"column_meta":[["affected_rows",4,4]],"data":[[0]],"rows":1}@ then
return -1
endi
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d ' select * from d1.t1 ' 127.0.0.1:7111/rest/sql
print 23-> $system_content
if $system_content != @{"status":"succ","head":["ts","speed"],"column_meta":[["ts",9,8],["speed",4,4]],"data":[],"rows":0}@ then
return -1
endi
#24
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d "insert into d1.t1 values('2017-12-25 21:28:41.022', 1)" 127.0.0.1:7111/rest/sql
print 24-> $system_content
if $system_content != @{"status":"succ","head":["affected_rows"],"column_meta":[["affected_rows",4,4]],"data":[[1]],"rows":1}@ then
return -1
endi
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d ' select * from d1.t1 ' 127.0.0.1:7111/rest/sql
print 25-> $system_content
if $system_content != @{"status":"succ","head":["ts","speed"],"column_meta":[["ts",9,8],["speed",4,4]],"data":[["2017-12-25 21:28:41.022",1]],"rows":1}@ then
return -1
endi
#26
print 25-> no print
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d "insert into d1.t1 values('2017-12-25 21:28:42.022', 2)" 127.0.0.1:7111/rest/sql
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d "insert into d1.t1 values('2017-12-25 21:28:43.022', 3)" 127.0.0.1:7111/rest/sql
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d "insert into d1.t1 values('2017-12-25 21:28:44.022', 4)" 127.0.0.1:7111/rest/sql
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d "insert into d1.t1 values('2017-12-25 21:28:45.022', 5)" 127.0.0.1:7111/rest/sql
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d "insert into d1.t1 values('2017-12-25 21:28:46.022', 6)" 127.0.0.1:7111/rest/sql
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d "insert into d1.t1 values('2017-12-25 21:28:47.022', 7)" 127.0.0.1:7111/rest/sql
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d "insert into d1.t1 values('2017-12-25 21:28:48.022', 8)" 127.0.0.1:7111/rest/sql
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d "insert into d1.t1 values('2017-12-25 21:28:49.022', 9)" 127.0.0.1:7111/rest/sql
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d "insert into d1.t1 values('2017-12-25 21:28:50.022', 10)" 127.0.0.1:7111/rest/sql
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d "insert into d1.t1 values('2017-12-25 21:28:51.022', 11)" 127.0.0.1:7111/rest/sql
#27
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d ' select * from d1.t1 ' 127.0.0.1:7111/rest/sql
print 27-> $system_content
if $system_content != @{"status":"succ","head":["ts","speed"],"column_meta":[["ts",9,8],["speed",4,4]],"data":[["2017-12-25 21:28:41.022",1],["2017-12-25 21:28:42.022",2],["2017-12-25 21:28:43.022",3],["2017-12-25 21:28:44.022",4],["2017-12-25 21:28:45.022",5],["2017-12-25 21:28:46.022",6],["2017-12-25 21:28:47.022",7],["2017-12-25 21:28:48.022",8],["2017-12-25 21:28:49.022",9],["2017-12-25 21:28:50.022",10],["2017-12-25 21:28:51.022",11]],"rows":11}@ then
return -1
endi
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'create database d2' 127.0.0.1:7111/rest/sql
print 28-> $system_content
if $system_content != @{"status":"succ","head":["affected_rows"],"column_meta":[["affected_rows",4,4]],"data":[[0]],"rows":1}@ then
return -1
endi
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d ' create table d2.t1 (ts timestamp, speed int)' 127.0.0.1:7111/rest/sql
print 29-> $system_content
if $system_content != @{"status":"succ","head":["affected_rows"],"column_meta":[["affected_rows",4,4]],"data":[[0]],"rows":1}@ then
return -1
endi
#30
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d "insert into d2.t1 values('2017-12-25 21:28:41.022', 1)" 127.0.0.1:7111/rest/sql
print 30-> $system_content
if $system_content != @{"status":"succ","head":["affected_rows"],"column_meta":[["affected_rows",4,4]],"data":[[1]],"rows":1}@ then
return -1
endi
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d ' select * from d2.t1 ' 127.0.0.1:7111/rest/sql
print 31-> $system_content
if $system_content != @{"status":"succ","head":["ts","speed"],"column_meta":[["ts",9,8],["speed",4,4]],"data":[["2017-12-25 21:28:41.022",1]],"rows":1}@ then
return -1
endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT
system sh/stop_dnodes.sh
sleep 2000
system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c walLevel -v 1
system sh/cfg.sh -n dnode1 -c http -v 1
system sh/cfg.sh -n dnode1 -c httpEnableRecordSql -v 1
system sh/exec.sh -n dnode1 -s start
sleep 2000
sql connect
print ============================ dnode1 start
print =============== step1 - prepare data
sql create database d1
sql use d1
sql create table d1.table_rest0 (ts timestamp, i int)
sql create table d1.table_rest1 (ts timestamp, i int)
sql create table d1.table_rest2 (ts timestamp, i int)
sql create table d1.table_rest3 (ts timestamp, i int)
sql create table d1.table_rest4 (ts timestamp, i int)
sql create table d1.table_rest5 (ts timestamp, i int)
sql create table d1.table_rest6 (ts timestamp, i int)
sql create table d1.table_rest7 (ts timestamp, i int)
sql create table d1.table_rest8 (ts timestamp, i int)
sql create table d1.table_rest9 (ts timestamp, i int)
print =============== step2 - login
system_content curl 127.0.0.1:7111/rest/login/root/taosdata
print curl 127.0.0.1:7111/rest/login/root/taosdata -----> $system_content
if $system_content != {"status":"succ","code":0,"desc":"/KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04"} then
return -1
endi
print =============== step3 - query data
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'insert into d1.table_rest1 values('now+1s', 1) d1.table_rest2 values('now+1s', 1) d1.table_rest3 values('now+1s', 1) d1.table_rest4 values('now+1s', 1) ' 127.0.0.1:7111/rest/sql
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'insert into d1.table_rest1 values('now+1s', 1) d1.table_rest2 values('now+1s', 1) d1.table_rest3 values('now+1s', 1) d1.table_rest4 values('now+1s', 1) ' 127.0.0.1:7111/rest/sql
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'insert into d1.table_rest1 values('now+1s', 1) d1.table_rest2 values('now+1s', 1) d1.table_rest3 values('now+1s', 1) d1.table_rest4 values('now+1s', 1) ' 127.0.0.1:7111/rest/sql
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'insert into d1.table_rest1 values('now+1s', 1) d1.table_rest2 values('now+1s', 1) d1.table_rest3 values('now+1s', 1) d1.table_rest4 values('now+1s', 1) ' 127.0.0.1:7111/rest/sql
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'insert into d1.table_rest1 values('now+1s', 1) d1.table_rest2 values('now+1s', 1) d1.table_rest3 values('now+1s', 1) d1.table_rest4 values('now+1s', 1) ' 127.0.0.1:7111/rest/sql
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'insert into d1.table_rest1 values('now+1s', 1) d1.table_rest2 values('now+1s', 1) d1.table_rest3 values('now+1s', 1) d1.table_rest4 values('now+1s', 1) ' 127.0.0.1:7111/rest/sql
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'insert into d1.table_rest1 values('now+1s', 1) d1.table_rest2 values('now+1s', 1) d1.table_rest3 values('now+1s', 1) d1.table_rest4 values('now+1s', 1) ' 127.0.0.1:7111/rest/sql
print =============== step5 - query data
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'select * from d1.table_rest1' 127.0.0.1:7111/rest/sql
print curl 127.0.0.1:7111/rest/sql -----> $system_content
system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
system sh/stop_dnodes.sh
sleep 2000
system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c walLevel -v 1
system sh/exec.sh -n dnode1 -s start
sleep 2000
sql connect
print ============================ dnode1 start
print =============== step1 - prepare data
$dbPrefix = db
$tbPrefix = tb
$mtPrefix = st
print =============== step1
$i = 0
$db = $dbPrefix . $i
$mt = $mtPrefix . $i
sql create database $db
sql use $db
sql create table $mt (ts timestamp, tbcol bigint, t1 bigint, t2 bigint, t3 bigint, t4 bigint, t5 bigint, t6 bigint, t7 bigint, t8 bigint, t9 bigint, t0 nchar(20)) TAGS(tgcol bigint)
$i = 0
while $i < 2
$tb = $tbPrefix . $i
sql create table $tb using $mt tags( 0 )
$x = 0
while $x < 2000
$ms = $x . m
sql insert into $tb values (now + $ms , 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, '你好' )
$x = $x + 1
endw
$i = $i + 1
endw
system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'select * from db0.st0 limit 100' 127.0.0.1:7111/rest/sql
print curl 127.0.0.1:7111/rest/sql -----> $system_content
#system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d "select * from db0.st0 where tbname in ('tb0', 'tb1') limit 1000" 127.0.0.1:7111/rest/sql
#print curl 127.0.0.1:7111/rest/sql -----> $system_content
system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
{
"hostIp": "192.168.100.128",
"connNum": 1,
"insertModel": "insertTable",
"waitTime": 0,
"tableDesc": "ts timestamp, i int",
"tablePrefix": "t",
"tablePerConn": 1,
"tableCreate": true,
"tableStart": 1,
"dbName": "db",
"dbReplica": 1,
"dbKeep": 3650,
"dbDays": 7,
"metricsName": "mt",
"tagNum": 10,
"dataNum": 1000,
"dataBegin": 1485878400000,
"dataInterval": 1000,
"dataBatch": 1000,
"dataInsert": true,
"dataRandom": false
}
{
"hostIp": "192.168.100.128",
"connNum": 10,
"insertModel": "insertTable",
"waitTime": 0,
"tableDesc": "ts timestamp, i int",
"tablePrefix": "t",
"tablePerConn": 1,
"tableCreate": true,
"tableStart": 1,
"dbName": "db",
"dbKeep": 3650,
"metricsName": "mt",
"tagNum": 10,
"dataNum": 1000,
"dataBegin": 1517414400000,
"dataInterval": 1000,
"dataBatch": 1000,
"dataInsert": true,
"dataRandom": false
}
{
"hostIp": "192.168.0.1",
"machineNum": 100,
"loopNum": 100,
"dbName": "db",
"dataBegin": 1485878400000
}
{
"metrics": [{
"fields": {
"result_code": 2,
"result_type": "connection_failed"
},
"name": "net_response",
"tags": {
"host": "panshi-gsl",
"port": "80",
"protocol": "tcp",
"result": "connection_failed",
"server": "localhost"
},
"timestamp": 1536750390000
},{
"fields": {
"load1": 0.27,
"load15": 0.09,
"load5": 0.22,
"n_cpus": 4,
"n_users": 4
},
"name": "system",
"tags": {
"host": "panshi-gsl"
},
"timestamp": 1536750390000
},{
"fields": {
"uptime": 122
},
"name": "system",
"tags": {
"host": "panshi-gsl"
},
"timestamp": 1536750390000
},
{
"fields": {
"uptime_format": " 0:02"
},
"name": "system",
"tags": {
"host": "panshi-gsl"
},
"timestamp": 1536750390000
},
{
"fields": {
"bytes_recv": 19964,
"bytes_sent": 11077,
"drop_in": 0,
"drop_out": 0,
"err_in": 0,
"err_out": 0,
"packets_recv": 237,
"packets_sent": 124
},
"name": "net",
"tags": {
"host": "panshi-gsl",
"interface": "eth0"
},
"timestamp": 1536750390000
},
{
"fields": {
"usage_guest": 0,
"usage_guest_nice": 0,
"usage_idle": 99.39879759519036,
"usage_iowait": 0,
"usage_irq": 0,
"usage_nice": 0,
"usage_softirq": 0,
"usage_steal": 0,
"usage_system": 0.300601202404812,
"usage_user": 0.30060120240480753
},
"name": "cpu",
"tags": {
"cpu": "cpu0",
"host": "panshi-gsl"
},
"timestamp": 1536750390000
},
{
"fields": {
"usage_guest": 0,
"usage_guest_nice": 0,
"usage_idle": 99.79959919839683,
"usage_iowait": 0,
"usage_irq": 0,
"usage_nice": 0,
"usage_softirq": 0,
"usage_steal": 0,
"usage_system": 0.2004008016032065,
"usage_user": 0
},
"name": "cpu",
"tags": {
"cpu": "cpu1",
"host": "panshi-gsl"
},
"timestamp": 1536750390000
},
{
"fields": {
"usage_guest": 0,
"usage_guest_nice": 0,
"usage_idle": 99.7999999999999,
"usage_iowait": 0,
"usage_irq": 0,
"usage_nice": 0,
"usage_softirq": 0,
"usage_steal": 0,
"usage_system": 0.10000000000000217,
"usage_user": 0.09999999999999995
},
"name": "cpu",
"tags": {
"cpu": "cpu2",
"host": "panshi-gsl"
},
"timestamp": 1536750390000
},
{
"fields": {
"usage_guest": 0,
"usage_guest_nice": 0,
"usage_idle": 99.7999999999999,
"usage_iowait": 0,
"usage_irq": 0,
"usage_nice": 0,
"usage_softirq": 0.09999999999999988,
"usage_steal": 0,
"usage_system": 0.09999999999999995,
"usage_user": 0
},
"name": "cpu",
"tags": {
"cpu": "cpu3",
"host": "panshi-gsl"
},
"timestamp": 1536750390000
},
{
"fields": {
"usage_guest": 0,
"usage_guest_nice": 0,
"usage_idle": 99.74956173303289,
"usage_iowait": 0,
"usage_irq": 0,
"usage_nice": 0,
"usage_softirq": 0.025043826696719312,
"usage_steal": 0,
"usage_system": 0.12521913348359823,
"usage_user": 0.10017530678687725
},
"name": "cpu",
"tags": {
"cpu": "cpu-total",
"host": "panshi-gsl"
},
"timestamp": 1536750390000
},
{
"fields": {
"icmp_inaddrmaskreps": 0,
"icmp_inaddrmasks": 0,
"icmp_indestunreachs": 1,
"icmp_inechoreps": 0,
"icmp_inechos": 0,
"icmp_inerrors": 0,
"icmp_inmsgs": 1,
"icmp_inparmprobs": 0,
"icmp_inredirects": 0,
"icmp_insrcquenchs": 0,
"icmp_intimeexcds": 0,
"icmp_intimestampreps": 0,
"icmp_intimestamps": 0,
"icmp_outaddrmaskreps": 0,
"icmp_outaddrmasks": 0,
"icmp_outdestunreachs": 5,
"icmp_outechoreps": 0,
"icmp_outechos": 0,
"icmp_outerrors": 0,
"icmp_outmsgs": 5,
"icmp_outparmprobs": 0,
"icmp_outredirects": 0,
"icmp_outsrcquenchs": 0,
"icmp_outtimeexcds": 0,
"icmp_outtimestampreps": 0,
"icmp_outtimestamps": 0,
"icmpmsg_intype3": 1,
"icmpmsg_outtype3": 5,
"ip_defaultttl": 64,
"ip_forwarding": 2,
"ip_forwdatagrams": 0,
"ip_fragcreates": 0,
"ip_fragfails": 0,
"ip_fragoks": 0,
"ip_inaddrerrors": 0,
"ip_indelivers": 132,
"ip_indiscards": 0,
"ip_inhdrerrors": 0,
"ip_inreceives": 132,
"ip_inunknownprotos": 0,
"ip_outdiscards": 0,
"ip_outnoroutes": 40,
"ip_outrequests": 134,
"ip_reasmfails": 0,
"ip_reasmoks": 0,
"ip_reasmreqds": 0,
"ip_reasmtimeout": 0,
"tcp_activeopens": 11,
"tcp_attemptfails": 11,
"tcp_currestab": 0,
"tcp_estabresets": 0,
"tcp_inerrs": 0,
"tcp_insegs": 22,
"tcp_maxconn": -1,
"tcp_outrsts": 11,
"tcp_outsegs": 22,
"tcp_passiveopens": 0,
"tcp_retranssegs": 0,
"tcp_rtoalgorithm": 1,
"tcp_rtomax": 120000,
"tcp_rtomin": 200,
"udp_indatagrams": 101,
"udp_inerrors": 0,
"udp_noports": 5,
"udp_outdatagrams": 109,
"udp_rcvbuferrors": 0,
"udp_sndbuferrors": 0,
"udplite_indatagrams": 0,
"udplite_inerrors": 0,
"udplite_noports": 0,
"udplite_outdatagrams": 0,
"udplite_rcvbuferrors": 0,
"udplite_sndbuferrors": 0
},
"name": "net",
"tags": {
"host": "panshi-gsl",
"interface": "all"
},
"timestamp": 1536750390000
},
{
"fields": {
"io_time": 44,
"iops_in_progress": 0,
"read_bytes": 569344,
"read_time": 44,
"reads": 108,
"weighted_io_time": 44,
"write_bytes": 0,
"write_time": 0,
"writes": 0
},
"name": "diskio",
"tags": {
"host": "panshi-gsl",
"name": "sda5"
},
"timestamp": 1536750390000
},
{
"fields": {
"io_time": 4900,
"iops_in_progress": 0,
"read_bytes": 262294528,
"read_time": 13607,
"reads": 8122,
"weighted_io_time": 27970,
"write_bytes": 12054528,
"write_time": 14369,
"writes": 1073
},
"name": "diskio",
"tags": {
"host": "panshi-gsl",
"name": "sda"
},
"timestamp": 1536750390000
},
{
"fields": {
"io_time": 4874,
"iops_in_progress": 0,
"read_bytes": 261522432,
"read_time": 13538,
"reads": 7963,
"weighted_io_time": 27901,
"write_bytes": 12054528,
"write_time": 14369,
"writes": 1073
},
"name": "diskio",
"tags": {
"host": "panshi-gsl",
"name": "sda1"
},
"timestamp": 1536750390000
},
{
"fields": {
"io_time": 15,
"iops_in_progress": 0,
"read_bytes": 2048,
"read_time": 15,
"reads": 2,
"weighted_io_time": 15,
"write_bytes": 0,
"write_time": 0,
"writes": 0
},
"name": "diskio",
"tags": {
"host": "panshi-gsl",
"name": "sda2"
},
"timestamp": 1536750390000
},
{
"fields": {
"free": 4106956800,
"inodes_free": 526746,
"inodes_total": 752192,
"inodes_used": 225446,
"total": 12112691200,
"used": 7390433280,
"used_percent": 64.27922535963918
},
"name": "disk",
"tags": {
"device": "rootfs",
"fstype": "rootfs",
"host": "panshi-gsl",
"mode": "rw",
"path": "/"
},
"timestamp": 1536750390000
},
{
"fields": {
"free": 4106956800,
"inodes_free": 526746,
"inodes_total": 752192,
"inodes_used": 225446,
"total": 12112691200,
"used": 7390433280,
"used_percent": 64.27922535963918
},
"name": "disk",
"tags": {
"device": "disk/by-uuid/d4becabf-b49c-4c1c-a4ea-b2f593018766",
"fstype": "ext3",
"host": "panshi-gsl",
"mode": "rw",
"path": "/"
},
"timestamp": 1536750390000
},
{
"fields": {
"boot_time": 1536750268,
"context_switches": 135890,
"entropy_avail": 514,
"interrupts": 112317,
"processes_forked": 3595
},
"name": "kernel",
"tags": {
"host": "panshi-gsl"
},
"timestamp": 1536750390000
},
{
"fields": {
"active": 211845120,
"available": 3829399552,
"available_percent": 92.37877479393148,
"buffered": 12083200,
"cached": 254320640,
"commit_limit": 2648322048,
"committed_as": 549027840,
"dirty": 49152,
"free": 3562995712,
"high_free": 0,
"high_total": 0,
"huge_page_size": 2097152,
"huge_pages_free": 0,
"huge_pages_total": 0,
"inactive": 214351872,
"low_free": 0,
"low_total": 0,
"mapped": 81797120,
"page_tables": 13062144,
"shared": 2682880,
"slab": 33386496,
"swap_cached": 0,
"swap_free": 575660032,
"swap_total": 575660032,
"total": 4145324032,
"used": 315924480,
"used_percent": 7.621225206068523,
"vmalloc_chunk": 35184198369280,
"vmalloc_total": 35184372087808,
"vmalloc_used": 168906752,
"wired": 0,
"write_back": 0,
"write_back_tmp": 0
},
"name": "mem",
"tags": {
"host": "panshi-gsl"
},
"timestamp": 1536750390000
},
{
"fields": {
"IpExtInBcastOctets": 1291,
"IpExtInBcastPkts": 6,
"IpExtInMcastOctets": 0,
"IpExtInMcastPkts": 0,
"IpExtInNoRoutes": 0,
"IpExtInOctets": 12160,
"IpExtInTruncatedPkts": 0,
"IpExtOutBcastOctets": 0,
"IpExtOutBcastPkts": 0,
"IpExtOutMcastOctets": 0,
"IpExtOutMcastPkts": 0,
"IpExtOutOctets": 9881,
"TcpExtArpFilter": 0,
"TcpExtDelayedACKLocked": 0,
"TcpExtDelayedACKLost": 0,
"TcpExtDelayedACKs": 0,
"TcpExtEmbryonicRsts": 0,
"TcpExtListenDrops": 0,
"TcpExtListenOverflows": 0,
"TcpExtLockDroppedIcmps": 0,
"TcpExtOfoPruned": 0,
"TcpExtOutOfWindowIcmps": 0,
"TcpExtPAWSActive": 0,
"TcpExtPAWSEstab": 0,
"TcpExtPAWSPassive": 0,
"TcpExtPruneCalled": 0,
"TcpExtRcvPruned": 0,
"TcpExtSyncookiesFailed": 0,
"TcpExtSyncookiesRecv": 0,
"TcpExtSyncookiesSent": 0,
"TcpExtTCPAbortFailed": 0,
"TcpExtTCPAbortOnClose": 0,
"TcpExtTCPAbortOnData": 0,
"TcpExtTCPAbortOnLinger": 0,
"TcpExtTCPAbortOnMemory": 0,
"TcpExtTCPAbortOnSyn": 0,
"TcpExtTCPAbortOnTimeout": 0,
"TcpExtTCPDSACKIgnoredNoUndo": 0,
"TcpExtTCPDSACKIgnoredOld": 0,
"TcpExtTCPDSACKOfoRecv": 0,
"TcpExtTCPDSACKOfoSent": 0,
"TcpExtTCPDSACKOldSent": 0,
"TcpExtTCPDSACKRecv": 0,
"TcpExtTCPDSACKUndo": 0,
"TcpExtTCPDirectCopyFromBacklog": 0,
"TcpExtTCPDirectCopyFromPrequeue": 0,
"TcpExtTCPFACKReorder": 0,
"TcpExtTCPFastRetrans": 0,
"TcpExtTCPForwardRetrans": 0,
"TcpExtTCPFullUndo": 0,
"TcpExtTCPHPAcks": 0,
"TcpExtTCPHPHits": 0,
"TcpExtTCPHPHitsToUser": 0,
"TcpExtTCPLoss": 0,
"TcpExtTCPLossFailures": 0,
"TcpExtTCPLossUndo": 0,
"TcpExtTCPLostRetransmit": 0,
"TcpExtTCPMD5NotFound": 0,
"TcpExtTCPMD5Unexpected": 0,
"TcpExtTCPMemoryPressures": 0,
"TcpExtTCPPartialUndo": 0,
"TcpExtTCPPrequeueDropped": 0,
"TcpExtTCPPrequeued": 0,
"TcpExtTCPPureAcks": 0,
"TcpExtTCPRcvCollapsed": 0,
"TcpExtTCPRenoFailures": 0,
"TcpExtTCPRenoRecovery": 0,
"TcpExtTCPRenoRecoveryFail": 0,
"TcpExtTCPRenoReorder": 0,
"TcpExtTCPSACKDiscard": 0,
"TcpExtTCPSACKReneging": 0,
"TcpExtTCPSACKReorder": 0,
"TcpExtTCPSackFailures": 0,
"TcpExtTCPSackMerged": 0,
"TcpExtTCPSackRecovery": 0,
"TcpExtTCPSackRecoveryFail": 0,
"TcpExtTCPSackShiftFallback": 0,
"TcpExtTCPSackShifted": 0,
"TcpExtTCPSchedulerFailed": 0,
"TcpExtTCPSlowStartRetrans": 0,
"TcpExtTCPSpuriousRTOs": 0,
"TcpExtTCPTSReorder": 0,
"TcpExtTCPTimeouts": 0,
"TcpExtTW": 0,
"TcpExtTWKilled": 0,
"TcpExtTWRecycled": 0
},
"name": "nstat",
"tags": {
"host": "panshi-gsl",
"name": "netstat"
},
"timestamp": 1536750390000
},
{
"fields": {
"IcmpInAddrMaskReps": 0,
"IcmpInAddrMasks": 0,
"IcmpInDestUnreachs": 1,
"IcmpInEchoReps": 0,
"IcmpInEchos": 0,
"IcmpInErrors": 0,
"IcmpInMsgs": 1,
"IcmpInParmProbs": 0,
"IcmpInRedirects": 0,
"IcmpInSrcQuenchs": 0,
"IcmpInTimeExcds": 0,
"IcmpInTimestampReps": 0,
"IcmpInTimestamps": 0,
"IcmpMsgInType3": 1,
"IcmpMsgOutType3": 5,
"IcmpOutAddrMaskReps": 0,
"IcmpOutAddrMasks": 0,
"IcmpOutDestUnreachs": 5,
"IcmpOutEchoReps": 0,
"IcmpOutEchos": 0,
"IcmpOutErrors": 0,
"IcmpOutMsgs": 5,
"IcmpOutParmProbs": 0,
"IcmpOutRedirects": 0,
"IcmpOutSrcQuenchs": 0,
"IcmpOutTimeExcds": 0,
"IcmpOutTimestampReps": 0,
"IcmpOutTimestamps": 0,
"IpDefaultTTL": 64,
"IpForwDatagrams": 0,
"IpForwarding": 2,
"IpFragCreates": 0,
"IpFragFails": 0,
"IpFragOKs": 0,
"IpInAddrErrors": 0,
"IpInDelivers": 132,
"IpInDiscards": 0,
"IpInHdrErrors": 0,
"IpInReceives": 132,
"IpInUnknownProtos": 0,
"IpOutDiscards": 0,
"IpOutNoRoutes": 40,
"IpOutRequests": 134,
"IpReasmFails": 0,
"IpReasmOKs": 0,
"IpReasmReqds": 0,
"IpReasmTimeout": 0,
"TcpActiveOpens": 12,
"TcpAttemptFails": 12,
"TcpCurrEstab": 0,
"TcpEstabResets": 0,
"TcpInErrs": 0,
"TcpInSegs": 24,
"TcpMaxConn": -1,
"TcpOutRsts": 12,
"TcpOutSegs": 24,
"TcpPassiveOpens": 0,
"TcpRetransSegs": 0,
"TcpRtoAlgorithm": 1,
"TcpRtoMax": 120000,
"TcpRtoMin": 200,
"UdpInDatagrams": 101,
"UdpInErrors": 0,
"UdpLiteInDatagrams": 0,
"UdpLiteInErrors": 0,
"UdpLiteNoPorts": 0,
"UdpLiteOutDatagrams": 0,
"UdpLiteRcvbufErrors": 0,
"UdpLiteSndbufErrors": 0,
"UdpNoPorts": 5,
"UdpOutDatagrams": 109,
"UdpRcvbufErrors": 0,
"UdpSndbufErrors": 0
},
"name": "nstat",
"tags": {
"host": "panshi-gsl",
"name": "snmp"
},
"timestamp": 1536750390000
},
{
"fields": {
"blocked": 0,
"dead": 0,
"idle": 0,
"paging": 0,
"running": 1,
"sleeping": 181,
"stopped": 0,
"total": 182,
"total_threads": 280,
"unknown": 0,
"zombies": 0
},
"name": "processes",
"tags": {
"host": "panshi-gsl"
},
"timestamp": 1536750390000
}]
}
\ No newline at end of file
此差异已折叠。
此差异已折叠。
1.编译
将main.go编译成可执行文件,telegrafTest
2.执行
./telegrafTest
3.参数 telegraf.json的配置文件
hostIp TDengine机器IP地址
machineNum 模拟的机器数量
loopNum 每个机器发送请求的次数
dbName 创建的数据库名称
dataBegin 生成模拟数据的开始时间戳
4.telegraf.req(通常不变)
1)替换panshi-gsl和1536750380000这两个字符串,生成http请求
2)通过http发送给TDengine
5.注意
1)修改配置文件/etc/taos/taos.cfg
httpDebugFlag 设置为131(131-httpDebug,135-httpDebug)
httpMaxThreads 按照机器情况设置,设置范围为(1-cpu cores)
httpCacheSessions 设置为可能的http连接的2倍
maxVnodeConnections 设置为100000
maxMeterConnections 设置为100000
maxShellConns 设置为100000
maxMgmtConnections 设置为100000
2)设置linux参数,例如
ulimit -n 600000
\ No newline at end of file
run general/http/autocreate.sim
run general/http/chunked.sim
run general/http/restful.sim
run general/http/restful_insert.sim
run general/http/restful_limit.sim
run general/http/restful_full.sim
run general/http/prepare.sim
run general/http/telegraf.sim
run general/http/grafana_bug.sim
run general/http/grafana.sim
......@@ -30,7 +30,18 @@ if $rows != 2 then
endi
print $data00 $data01 $data02
print $data00 $data11 $data12
print $data10 $data11 $data12
sql drop table st2
sql show stables
if $rows != 1 then
return -1
endi
print -->
print $data00 $data01 $data02
print $data10 $data11 $data12
return
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册