未验证 提交 a811391a 编写于 作者: S Shengliang Guan 提交者: GitHub

Merge pull request #8523 from taosdata/feature/dnode3

Feature/dnode3
...@@ -40,87 +40,112 @@ enum { ...@@ -40,87 +40,112 @@ enum {
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_SUBMIT, "submit" ) TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_SUBMIT, "submit" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_QUERY, "query" ) TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_QUERY, "query" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_FETCH, "fetch" ) TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_FETCH, "fetch" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CREATE_TABLE, "create-table" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DROP_TABLE, "drop-table" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_ALTER_TABLE, "alter-table" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_UPDATE_TAG_VAL, "update-tag-val" ) TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_UPDATE_TAG_VAL, "update-tag-val" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_MQ_CONNECT, "mq-connect" ) TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_TABLE_META, "table-meta" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_MQ_DISCONNECT, "mq-disconnect" ) TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_TABLES_META, "tables-meta" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_STABLE_VGROUP, "stable-vgroup" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_MQ_CONSUME, "mq-consume" ) TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_MQ_CONSUME, "mq-consume" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_MQ_QUERY, "mq-query" ) TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_MQ_QUERY, "mq-query" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_MQ_CONNECT, "mq-connect" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_MQ_DISCONNECT, "mq-disconnect" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_MQ_ACK, "mq-ack" ) TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_MQ_ACK, "mq-ack" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_MQ_RESET, "mq-reset" ) TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_MQ_RESET, "mq-reset" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_NETWORK_TEST, "nettest" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DUMMY0, "dummy0" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DUMMY1, "dummy1" ) TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DUMMY1, "dummy1" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DUMMY2, "dummy2" ) TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DUMMY2, "dummy2" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DUMMY3, "dummy3" ) TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DUMMY3, "dummy3" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DUMMY4, "dummy4" )
// message from mnode to dnode TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DUMMY5, "dummy5" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_MD_CREATE_TABLE, "create-table" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_MD_DROP_TABLE, "drop-table" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_MD_ALTER_TABLE, "alter-table" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_MD_CREATE_VNODE, "create-vnode" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_MD_DROP_VNODE, "drop-vnode" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_MD_DROP_STABLE, "drop-stable" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_MD_ALTER_STREAM, "alter-stream" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_MD_CONFIG_DNODE, "config-dnode" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_MD_ALTER_VNODE, "alter-vnode" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_MD_SYNC_VNODE, "sync-vnode" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_MD_CREATE_MNODE, "create-mnode" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_MD_COMPACT_VNODE, "compact-vnode" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DUMMY6, "dummy6" ) TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DUMMY6, "dummy6" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DUMMY7, "dummy7" ) TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DUMMY7, "dummy7" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DUMMY8, "dummy8" )
// message from client to mnode
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_CONNECT, "connect" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_CREATE_ACCT, "create-acct" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_ALTER_ACCT, "alter-acct" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_DROP_ACCT, "drop-acct" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_CREATE_USER, "create-user" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_ALTER_USER, "alter-user" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_DROP_USER, "drop-user" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_CREATE_DNODE, "create-dnode" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_DROP_DNODE, "drop-dnode" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_CREATE_DB, "create-db" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_CREATE_FUNCTION, "create-function" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_DROP_DB, "drop-db" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_DROP_FUNCTION, "drop-function" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_USE_DB, "use-db" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_ALTER_DB, "alter-db" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_SYNC_DB, "sync-db-replica" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_CREATE_TABLE, "create-table" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_DROP_TABLE, "drop-table" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_ALTER_TABLE, "alter-table" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_TABLE_META, "table-meta" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_STABLE_VGROUP, "stable-vgroup" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_COMPACT_VNODE, "compact-vnode" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_TABLES_META, "multiTable-meta" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_ALTER_STREAM, "alter-stream" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_SHOW, "show" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_RETRIEVE, "retrieve" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_KILL_QUERY, "kill-query" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_KILL_STREAM, "kill-stream" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_KILL_CONN, "kill-conn" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_CONFIG_DNODE, "cm-config-dnode" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_HEARTBEAT, "heartbeat" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_RETRIEVE_FUNC, "retrieve-func" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DUMMY9, "dummy9" ) TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DUMMY9, "dummy9" )
// message from mnode to dnode
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CREATE_STABLE_IN, "create-stable" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_ALTER_STABLE_IN, "alter-stable" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DROP_STABLE_IN, "drop-stable" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CREATE_VNODE_IN, "create-vnode" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_ALTER_VNODE_IN, "alter-vnode" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DROP_VNODE_IN, "drop-vnode" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_SYNC_VNODE_IN, "sync-vnode" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_COMPACT_VNODE_IN, "compact-vnode" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CREATE_MNODE_IN, "create-mnode" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DROP_MNODE_IN, "drop-mnode" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CONFIG_DNODE_IN, "config-dnode" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DUMMY10, "dummy10" ) TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DUMMY10, "dummy10" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DUMMY11, "dummy11" ) TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DUMMY11, "dummy11" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DUMMY12, "dummy12" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DUMMY13, "dummy13" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DUMMY14, "dummy14" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DUMMY15, "dummy15" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DUMMY16, "dummy16" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DUMMY17, "dummy17" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DUMMY18, "dummy18" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DUMMY19, "dummy19" )
// message from client to mnode
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CONNECT, "connect" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CREATE_ACCT, "create-acct" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_ALTER_ACCT, "alter-acct" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DROP_ACCT, "drop-acct" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CREATE_USER, "create-user" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_ALTER_USER, "alter-user" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DROP_USER, "drop-user" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CREATE_DNODE, "create-dnode" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CONFIG_DNODE, "config-dnode" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DROP_DNODE, "drop-dnode" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CREATE_DB, "create-db" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DROP_DB, "drop-db" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_USE_DB, "use-db" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_ALTER_DB, "alter-db" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_SYNC_DB, "sync-db" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CREATE_TOPIC, "create-topic" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DROP_TOPIC, "drop-topic" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_ALTER_TOPIC, "alter-topic" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CREATE_FUNCTION, "create-function" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_ALTER_FUNCTION, "alter-function" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DROP_FUNCTION, "drop-function" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CREATE_STABLE, "create-stable" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DROP_STABLE, "drop-stable" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_ALTER_STABLE, "alter-stable" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_KILL_QUERY, "kill-query" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_KILL_CONN, "kill-conn" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_HEARTBEAT, "heartbeat" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_SHOW, "show" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_SHOW_RETRIEVE, "retrieve" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_SHOW_RETRIEVE_FUNC, "retrieve-func" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_COMPACT_VNODE, "compact-vnode" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DUMMY20, "dummy20" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DUMMY21, "dummy21" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DUMMY22, "dummy22" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DUMMY23, "dummy23" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DUMMY24, "dummy24" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DUMMY25, "dummy25" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DUMMY26, "dummy26" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DUMMY27, "dummy27" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DUMMY28, "dummy28" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DUMMY29, "dummy29" )
// message from dnode to mnode // message from dnode to mnode
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DM_CONFIG_TABLE, "config-table" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DM_CONFIG_VNODE, "config-vnode" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DM_STATUS, "status" ) TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DM_STATUS, "status" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DM_GRANT, "grant" ) TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DM_GRANT, "grant" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DM_AUTH, "auth" ) TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DM_AUTH, "auth" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DUMMY12, "dummy12" ) TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DUMMY30, "dummy30" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DUMMY13, "dummy13" ) TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DUMMY31, "dummy31" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DUMMY14, "dummy14" ) TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DUMMY32, "dummy32" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_NETWORK_TEST, "nettest" ) TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DUMMY33, "dummy33" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DUMMY34, "dummy34" )
// message for topic TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DUMMY35, "dummy35" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_CREATE_TP, "create-tp" ) TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DUMMY36, "dummy36" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_DROP_TP, "drop-tp" ) TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DUMMY37, "dummy37" )
//TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_USE_TP, "use-tp" ) TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DUMMY38, "dummy38" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_ALTER_TP, "alter-tp" ) TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DUMMY39, "dummy39" )
#ifndef TAOS_MESSAGE_C #ifndef TAOS_MESSAGE_C
TSDB_MSG_TYPE_MAX // 147 TSDB_MSG_TYPE_MAX // 147
......
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _TD_DNODE_H_
#define _TD_DNODE_H_
#ifdef __cplusplus
extern "C" {
#endif
struct SRpcEpSet;
struct SRpcMsg;
/**
* Initialize and start the dnode module.
*
* @return Error code.
*/
int32_t dnodeInit();
/**
* Stop and cleanup dnode module.
*/
void dnodeCleanup();
/**
* Send messages to other dnodes, such as create vnode message.
*
* @param epSet, the endpoint list of the dnodes.
* @param rpcMsg, message to be sent.
*/
void dnodeSendMsgToDnode(struct SRpcEpSet *epSet, struct SRpcMsg *rpcMsg);
/**
* Send messages to mnode, such as config message.
*
* @param rpcMsg, message to be sent.
*/
void dnodeSendMsgToMnode(struct SRpcMsg *rpcMsg);
/**
* Send redirect message to dnode or shell.
*
* @param rpcMsg, message to be sent.
* @param forShell, used to identify whether to send to shell or dnode.
*/
void dnodeSendRedirectMsg(struct SRpcMsg *rpcMsg, bool forShell);
/**
* Get the corresponding endpoint information from dnodeId.
*
* @param dnodeId, the id ot dnode.
* @param ep, the endpoint of dnode.
* @param fqdn, the fqdn of dnode.
* @param port, the port of dnode.
*/
void dnodeGetEp(int32_t dnodeId, char *ep, char *fqdn, uint16_t *port);
/**
* Report the startup progress.
*/
void dnodeReportStartup(char *name, char *desc);
#ifdef __cplusplus
}
#endif
#endif /*_TD_DNODE_H_*/
...@@ -16,93 +16,111 @@ ...@@ -16,93 +16,111 @@
#ifndef _TD_VNODE_H_ #ifndef _TD_VNODE_H_
#define _TD_VNODE_H_ #define _TD_VNODE_H_
#include "os.h"
#include "taosmsg.h"
#include "trpc.h"
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
#endif #endif
typedef struct { typedef struct SVnode SVnode;
/**
* Send messages to other dnodes, such as create vnode message.
*
* @param epSet, the endpoint list of dnodes.
* @param rpcMsg, message to be sent.
*/
void (*SendMsgToDnode)(struct SRpcEpSet *epSet, struct SRpcMsg *rpcMsg);
/**
* Send messages to mnode, such as config message.
*
* @param rpcMsg, message to be sent.
*/
void (*SendMsgToMnode)(struct SRpcMsg *rpcMsg);
/** typedef struct {
* Get the corresponding endpoint information from dnodeId. char dbName[TSDB_ACCT_ID_LEN + TSDB_DB_NAME_LEN];
* int32_t cacheBlockSize; // MB
* @param dnodeId, the id ot dnode. int32_t totalBlocks;
* @param ep, the endpoint of dnode. int32_t daysPerFile;
* @param fqdn, the fqdn of dnode. int32_t daysToKeep0;
* @param port, the port of dnode. int32_t daysToKeep1;
*/ int32_t daysToKeep2;
void (*GetDnodeEp)(int32_t dnodeId, char *ep, char *fqdn, uint16_t *port); int32_t minRowsPerFileBlock;
int32_t maxRowsPerFileBlock;
int8_t precision; // time resolution
int8_t compression;
int8_t cacheLastRow;
int8_t update;
int8_t quorum;
int8_t replica;
int8_t walLevel;
int32_t fsyncPeriod; // millisecond
SVnodeDesc replicas[TSDB_MAX_REPLICA];
} SVnodeCfg;
/** typedef struct {
* Report the startup progress. int64_t totalStorage;
*/ int64_t compStorage;
void (*ReportStartup)(char *name, char *desc); int64_t pointsWritten;
int64_t tablesNum;
} SVnodeStatisic;
} SVnodeFp; typedef struct {
int8_t syncRole;
} SVnodeStatus;
typedef struct { typedef struct {
SVnodeFp fp; int32_t accessState;
} SVnodePara; } SVnodeAccess;
typedef struct SVnodeMsg {
int32_t msgType;
int32_t code;
SRpcMsg rpcMsg; // original message from rpc
int32_t contLen;
char pCont[];
} SVnodeMsg;
/** /**
* Start initialize vnode module. * Start initialize vnode module.
* *
* @param para, initialization parameters.
* @return Error code. * @return Error code.
*/ */
int32_t vnodeInit(SVnodePara para); int32_t vnodeInit();
/** /**
* Cleanup vnode module. * Cleanup vnode module.
*/ */
void vnodeCleanup(); void vnodeCleanup();
typedef struct {
int32_t unused;
} SVnodeStat;
/** /**
* Get the statistical information of vnode. * Get the statistical information of vnode.
* *
* @param stat, statistical information. * @param pVnode,
* @param pStat, statistical information.
* @return Error Code. * @return Error Code.
*/ */
int32_t vnodeGetStatistics(SVnodeStat *stat); int32_t vnodeGetStatistics(SVnode *pVnode, SVnodeStatisic *pStat);
/** /**
* Get the status of all vnodes. * Get the status of all vnodes.
* *
* @param status, status msg. * @param pVnode,
* @param status, status information.
* @return Error Code.
*/ */
void vnodeGetStatus(struct SStatusMsg *status); int32_t vnodeGetStatus(SVnode *pVnode, SVnodeStatus *pStatus);
/** /**
* Set access permissions for all vnodes. * Operation functions of vnode
* *
* @param access, access permissions of vnodes. * @return Error Code.
* @param numOfVnodes, the size of vnodes.
*/ */
void vnodeSetAccess(struct SVgroupAccess *access, int32_t numOfVnodes); SVnode *vnodeOpen(int32_t vgId, const char *path);
void vnodeClose(SVnode *pVnode);
int32_t vnodeAlter(SVnode *pVnode, const SVnodeCfg *pCfg);
SVnode *vnodeCreate(int32_t vgId, const char *path, const SVnodeCfg *pCfg);
int32_t vnodeDrop(SVnode *pVnode);
int32_t vnodeCompact(SVnode *pVnode);
int32_t vnodeSync(SVnode *pVnode);
/** /**
* Interface for processing messages. * Interface for processing messages.
* *
* @param msg, message to be processed. * @param pVnode,
* @param pMsg, message to be processed.
*
*/ */
void vnodeProcessMsg(SRpcMsg *msg); int32_t vnodeProcessMsg(SVnode *pVnode, SVnodeMsg *pMsg);
#ifdef __cplusplus #ifdef __cplusplus
} }
......
...@@ -37,21 +37,24 @@ shall be used to set up the protection. ...@@ -37,21 +37,24 @@ shall be used to set up the protection.
*/ */
typedef void* taos_queue; typedef void *taos_queue;
typedef void* taos_qset; typedef void *taos_qset;
typedef void* taos_qall; typedef void *taos_qall;
typedef void *(*FProcessItem)(void *pItem, void *ahandle);
typedef void *(*FProcessItems)(taos_qall qall, int numOfItems, void *ahandle);
taos_queue taosOpenQueue(); taos_queue taosOpenQueue();
void taosCloseQueue(taos_queue); void taosCloseQueue(taos_queue);
void taosSetQueueFp(taos_queue, FProcessItem, FProcessItems);
void *taosAllocateQitem(int size); void *taosAllocateQitem(int size);
void taosFreeQitem(void *item); void taosFreeQitem(void *pItem);
int taosWriteQitem(taos_queue, int type, void *item); int taosWriteQitem(taos_queue, void *pItem);
int taosReadQitem(taos_queue, int *type, void **pitem); int taosReadQitem(taos_queue, void **pItem);
taos_qall taosAllocateQall(); taos_qall taosAllocateQall();
void taosFreeQall(taos_qall); void taosFreeQall(taos_qall);
int taosReadAllQitems(taos_queue, taos_qall); int taosReadAllQitems(taos_queue, taos_qall);
int taosGetQitem(taos_qall, int *type, void **pitem); int taosGetQitem(taos_qall, void **pItem);
void taosResetQitems(taos_qall); void taosResetQitems(taos_qall);
taos_qset taosOpenQset(); taos_qset taosOpenQset();
...@@ -61,8 +64,8 @@ int taosAddIntoQset(taos_qset, taos_queue, void *ahandle); ...@@ -61,8 +64,8 @@ int taosAddIntoQset(taos_qset, taos_queue, void *ahandle);
void taosRemoveFromQset(taos_qset, taos_queue); void taosRemoveFromQset(taos_qset, taos_queue);
int taosGetQueueNumber(taos_qset); int taosGetQueueNumber(taos_qset);
int taosReadQitemFromQset(taos_qset, int *type, void **pitem, void **handle); int taosReadQitemFromQset(taos_qset, void **pItem, void **ahandle, FProcessItem *);
int taosReadAllQitemsFromQset(taos_qset, taos_qall, void **handle); int taosReadAllQitemsFromQset(taos_qset, taos_qall, void **ahandle, FProcessItems *);
int taosGetQueueItemsNumber(taos_queue param); int taosGetQueueItemsNumber(taos_queue param);
int taosGetQsetItemsNumber(taos_qset param); int taosGetQsetItemsNumber(taos_qset param);
......
...@@ -22,13 +22,6 @@ ...@@ -22,13 +22,6 @@
extern "C" { extern "C" {
#endif #endif
typedef int32_t (*ProcessStartFp)(void *ahandle, void *pMsg, int32_t qtype);
typedef void (*ProcessEndFp)(void *ahandle, void *pMsg, int32_t qtype, int32_t code);
typedef bool (*ProcessWriteStartFp)(void *ahandle, void *pMsg, int32_t qtype);
typedef void (*ProcessWriteSyncFp)(void *ahandle, int32_t code);
typedef void (*ProcessWriteEndFp)(void *ahandle, void *pMsg, int32_t qtype);
typedef struct SWorker { typedef struct SWorker {
int32_t id; // worker ID int32_t id; // worker ID
pthread_t thread; // thread pthread_t thread; // thread
...@@ -40,41 +33,36 @@ typedef struct SWorkerPool { ...@@ -40,41 +33,36 @@ typedef struct SWorkerPool {
int32_t min; // min number of workers int32_t min; // min number of workers
int32_t num; // current number of workers int32_t num; // current number of workers
taos_qset qset; taos_qset qset;
const char * name; const char *name;
ProcessStartFp startFp; SWorker *workers;
ProcessEndFp endFp;
SWorker * workers;
pthread_mutex_t mutex; pthread_mutex_t mutex;
} SWorkerPool; } SWorkerPool;
typedef struct SWriteWorker { typedef struct SMWorker {
int32_t id; // worker id int32_t id; // worker id
pthread_t thread; // thread pthread_t thread; // thread
taos_qall qall; taos_qall qall;
taos_qset qset; // queue set taos_qset qset; // queue set
struct SWriteWorkerPool *pool; struct SMWorkerPool *pool;
} SWriteWorker; } SMWorker;
typedef struct SWriteWorkerPool { typedef struct SMWorkerPool {
int32_t max; // max number of workers int32_t max; // max number of workers
int32_t nextId; // from 0 to max-1, cyclic int32_t nextId; // from 0 to max-1, cyclic
const char * name; const char *name;
ProcessWriteStartFp startFp; SMWorker *workers;
ProcessWriteSyncFp syncFp;
ProcessWriteEndFp endFp;
SWriteWorker * workers;
pthread_mutex_t mutex; pthread_mutex_t mutex;
} SWriteWorkerPool; } SMWorkerPool;
int32_t tWorkerInit(SWorkerPool *pool); int32_t tWorkerInit(SWorkerPool *pool);
void tWorkerCleanup(SWorkerPool *pool); void tWorkerCleanup(SWorkerPool *pool);
taos_queue tWorkerAllocQueue(SWorkerPool *pool, void *ahandle); taos_queue tWorkerAllocQueue(SWorkerPool *pool, void *ahandle, FProcessItem fp);
void tWorkerFreeQueue(SWorkerPool *pool, taos_queue queue); void tWorkerFreeQueue(SWorkerPool *pool, taos_queue queue);
int32_t tWriteWorkerInit(SWriteWorkerPool *pool); int32_t tMWorkerInit(SMWorkerPool *pool);
void tWriteWorkerCleanup(SWriteWorkerPool *pool); void tMWorkerCleanup(SMWorkerPool *pool);
taos_queue tWriteWorkerAllocQueue(SWriteWorkerPool *pool, void *ahandle); taos_queue tMWorkerAllocQueue(SMWorkerPool *pool, void *ahandle, FProcessItems fp);
void tWriteWorkerFreeQueue(SWriteWorkerPool *pool, taos_queue queue); void tMWorkerFreeQueue(SMWorkerPool *pool, taos_queue queue);
#ifdef __cplusplus #ifdef __cplusplus
} }
......
...@@ -3,4 +3,4 @@ add_subdirectory(util) ...@@ -3,4 +3,4 @@ add_subdirectory(util)
add_subdirectory(common) add_subdirectory(common)
add_subdirectory(libs) add_subdirectory(libs)
add_subdirectory(client) add_subdirectory(client)
add_subdirectory(server) add_subdirectory(dnode)
\ No newline at end of file \ No newline at end of file
add_subdirectory(mnode) add_subdirectory(mnode)
add_subdirectory(vnode) add_subdirectory(vnode)
add_subdirectory(qnode) add_subdirectory(qnode)
add_subdirectory(dnode) add_subdirectory(mgmt)
aux_source_directory(. TAOSD_SRC)
add_executable(taosd ${TAOSD_SRC})
target_link_libraries(
taosd
PUBLIC dnode
PUBLIC util
)
\ No newline at end of file
aux_source_directory(src DNODE_SRC) aux_source_directory(src DNODE_SRC)
add_library(dnode ${DNODE_SRC}) add_executable(taosd ${DNODE_SRC})
target_link_libraries( target_link_libraries(
dnode taosd
PUBLIC cjson PUBLIC cjson
PUBLIC mnode PUBLIC mnode
PUBLIC vnode PUBLIC vnode
...@@ -10,7 +10,7 @@ target_link_libraries( ...@@ -10,7 +10,7 @@ target_link_libraries(
PUBLIC taos PUBLIC taos
) )
target_include_directories( target_include_directories(
dnode taosd
PUBLIC "${CMAKE_SOURCE_DIR}/include/server/dnode" PUBLIC "${CMAKE_SOURCE_DIR}/include/server/dnode"
private "${CMAKE_CURRENT_SOURCE_DIR}/inc" private "${CMAKE_CURRENT_SOURCE_DIR}/inc"
) )
...@@ -13,8 +13,8 @@ ...@@ -13,8 +13,8 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>. * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/ */
#ifndef _TD_DNODE_EPS_H_ #ifndef _TD_DNODE_CONFIG_H_
#define _TD_DNODE_EPS_H_ #define _TD_DNODE_CONFIG_H_
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
...@@ -39,4 +39,4 @@ void dnodeSendRedirectMsg(SRpcMsg *rpcMsg, bool forShell); ...@@ -39,4 +39,4 @@ void dnodeSendRedirectMsg(SRpcMsg *rpcMsg, bool forShell);
} }
#endif #endif
#endif /*_TD_DNODE_EPS_H_*/ #endif /*_TD_DNODE_CONFIG_H_*/
\ No newline at end of file \ No newline at end of file
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _TD_DNODE_DNODE_H_
#define _TD_DNODE_DNODE_H_
#ifdef __cplusplus
extern "C" {
#endif
#include "dnodeInt.h"
int32_t dnodeInitMsg();
void dnodeCleanupMsg();
void dnodeProcessStatusRsp(SRpcMsg *pMsg);
void dnodeProcessStartupReq(SRpcMsg *pMsg);
void dnodeProcessConfigDnodeReq(SRpcMsg *pMsg);
#ifdef __cplusplus
}
#endif
#endif /*_TD_DNODE_DNODE_H_*/
\ No newline at end of file
...@@ -25,7 +25,6 @@ extern "C" { ...@@ -25,7 +25,6 @@ extern "C" {
#include "tlog.h" #include "tlog.h"
#include "trpc.h" #include "trpc.h"
#include "ttimer.h" #include "ttimer.h"
#include "dnode.h"
extern int32_t dDebugFlag; extern int32_t dDebugFlag;
...@@ -38,6 +37,9 @@ extern int32_t dDebugFlag; ...@@ -38,6 +37,9 @@ extern int32_t dDebugFlag;
typedef enum { DN_RUN_STAT_INIT, DN_RUN_STAT_RUNNING, DN_RUN_STAT_STOPPED } EDnStat; typedef enum { DN_RUN_STAT_INIT, DN_RUN_STAT_RUNNING, DN_RUN_STAT_STOPPED } EDnStat;
int32_t dnodeInit();
void dnodeCleanup();
EDnStat dnodeGetRunStat(); EDnStat dnodeGetRunStat();
void dnodeSetRunStat(); void dnodeSetRunStat();
void dnodeGetStartup(SStartupStep *); void dnodeGetStartup(SStartupStep *);
......
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _TD_DNODE_MNODE_H_
#define _TD_DNODE_MNODE_H_
#ifdef __cplusplus
extern "C" {
#endif
#include "dnodeInt.h"
void dnodeProcessCreateMnodeReq(SRpcMsg *pMsg);
#ifdef __cplusplus
}
#endif
#endif /*_TD_DNODE_MNODE_H_*/
\ No newline at end of file
...@@ -13,8 +13,8 @@ ...@@ -13,8 +13,8 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>. * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/ */
#ifndef _TD_DNODE_TRANS_H_ #ifndef _TD_DNODE_TRANSPORT_H_
#define _TD_DNODE_TRANS_H_ #define _TD_DNODE_TRANSPORT_H_
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
...@@ -30,4 +30,4 @@ void dnodeSendMsgToDnode(SRpcEpSet *epSet, SRpcMsg *rpcMsg); ...@@ -30,4 +30,4 @@ void dnodeSendMsgToDnode(SRpcEpSet *epSet, SRpcMsg *rpcMsg);
} }
#endif #endif
#endif /*_TD_DNODE_TRANS_H_*/ #endif /*_TD_DNODE_TRANSPORT_H_*/
...@@ -13,8 +13,8 @@ ...@@ -13,8 +13,8 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>. * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/ */
#ifndef _TD_DNODE_STATUS_H_ #ifndef _TD_DNODE_VNODES_H_
#define _TD_DNODE_STATUS_H_ #define _TD_DNODE_VNODES_H_
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
...@@ -32,4 +32,4 @@ void dnodeProcessConfigDnodeReq(SRpcMsg *pMsg); ...@@ -32,4 +32,4 @@ void dnodeProcessConfigDnodeReq(SRpcMsg *pMsg);
} }
#endif #endif
#endif /*_TD_DNODE_STATUS_H_*/ #endif /*_TD_DNODE_VNODES_H_*/
\ No newline at end of file \ No newline at end of file
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
*/ */
#define _DEFAULT_SOURCE #define _DEFAULT_SOURCE
#include "dnodeEps.h" #include "dnodeConfig.h"
#include "cJSON.h" #include "cJSON.h"
#include "thash.h" #include "thash.h"
......
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#define _DEFAULT_SOURCE
#include "dnodeDnode.h"
#include "dnodeConfig.h"
#include "mnode.h"
#include "tthread.h"
#include "ttime.h"
#include "vnode.h"
static struct {
pthread_t *threadId;
bool stop;
uint32_t rebootTime;
} tsMsg;
static void dnodeSendStatusMsg() {
int32_t contLen = sizeof(SStatusMsg) + TSDB_MAX_VNODES * sizeof(SVnodeLoad);
SStatusMsg *pStatus = rpcMallocCont(contLen);
if (pStatus == NULL) {
dError("failed to malloc status message");
return;
}
pStatus->version = htonl(tsVersion);
pStatus->dnodeId = htonl(dnodeGetDnodeId());
tstrncpy(pStatus->dnodeEp, tsLocalEp, TSDB_EP_LEN);
pStatus->clusterId = htobe64(dnodeGetClusterId());
pStatus->lastReboot = htonl(tsMsg.rebootTime);
pStatus->numOfCores = htonl(tsNumOfCores);
pStatus->diskAvailable = tsAvailDataDirGB;
// fill cluster cfg parameters
pStatus->clusterCfg.statusInterval = htonl(tsStatusInterval);
pStatus->clusterCfg.checkTime = 0;
tstrncpy(pStatus->clusterCfg.timezone, tsTimezone, 64);
char timestr[32] = "1970-01-01 00:00:00.00";
(void)taosParseTime(timestr, &pStatus->clusterCfg.checkTime, (int32_t)strlen(timestr), TSDB_TIME_PRECISION_MILLI, 0);
tstrncpy(pStatus->clusterCfg.locale, tsLocale, TSDB_LOCALE_LEN);
tstrncpy(pStatus->clusterCfg.charset, tsCharset, TSDB_LOCALE_LEN);
// vnodeGetStatus(NULL, pStatus);
// contLen = sizeof(SStatusMsg) + pStatus->openVnodes * sizeof(SVnodeLoad);
// pStatus->openVnodes = htons(pStatus->openVnodes);
SRpcMsg rpcMsg = {.ahandle = NULL, .pCont = pStatus, .contLen = contLen, .msgType = TSDB_MSG_TYPE_DM_STATUS};
dnodeSendMsgToMnode(&rpcMsg);
}
void dnodeProcessStatusRsp(SRpcMsg *pMsg) {
dTrace("status rsp is received, code:%s", tstrerror(pMsg->code));
if (pMsg->code != TSDB_CODE_SUCCESS) return;
SStatusRsp *pStatusRsp = pMsg->pCont;
SDnodeCfg *pCfg = &pStatusRsp->dnodeCfg;
pCfg->dnodeId = htonl(pCfg->dnodeId);
pCfg->clusterId = htobe64(pCfg->clusterId);
pCfg->numOfVnodes = htonl(pCfg->numOfVnodes);
pCfg->numOfDnodes = htonl(pCfg->numOfDnodes);
dnodeUpdateCfg(pCfg);
if (pCfg->dropped) {
dError("status rsp is received, and set dnode to drop status");
return;
}
// vnodeSetAccess(pStatusRsp->vgAccess, pCfg->numOfVnodes);
SDnodeEps *eps = (SDnodeEps *)((char *)pStatusRsp->vgAccess + pCfg->numOfVnodes * sizeof(SVgroupAccess));
eps->dnodeNum = htonl(eps->dnodeNum);
for (int32_t i = 0; i < eps->dnodeNum; ++i) {
eps->dnodeEps[i].dnodeId = htonl(eps->dnodeEps[i].dnodeId);
eps->dnodeEps[i].dnodePort = htons(eps->dnodeEps[i].dnodePort);
}
dnodeUpdateDnodeEps(eps);
}
static void *dnodeThreadRoutine(void *param) {
int32_t ms = tsStatusInterval * 1000;
while (!tsMsg.stop) {
taosMsleep(ms);
dnodeSendStatusMsg();
}
}
int32_t dnodeInitMsg() {
tsMsg.stop = false;
tsMsg.rebootTime = taosGetTimestampSec();
tsMsg.threadId = taosCreateThread(dnodeThreadRoutine, NULL);
if (tsMsg.threadId == NULL) {
return -1;
}
dInfo("dnode msg is initialized");
return 0;
}
void dnodeCleanupMsg() {
if (tsMsg.threadId != NULL) {
tsMsg.stop = true;
taosDestoryThread(tsMsg.threadId);
tsMsg.threadId = NULL;
}
dInfo("dnode msg is cleanuped");
}
static int32_t dnodeStartMnode(SRpcMsg *pMsg) {
SCreateMnodeMsg *pCfg = pMsg->pCont;
pCfg->dnodeId = htonl(pCfg->dnodeId);
pCfg->mnodeNum = htonl(pCfg->mnodeNum);
for (int32_t i = 0; i < pCfg->mnodeNum; ++i) {
pCfg->mnodeEps[i].dnodeId = htonl(pCfg->mnodeEps[i].dnodeId);
pCfg->mnodeEps[i].dnodePort = htons(pCfg->mnodeEps[i].dnodePort);
}
if (pCfg->dnodeId != dnodeGetDnodeId()) {
dDebug("dnode:%d, in create meps msg is not equal with saved dnodeId:%d", pCfg->dnodeId, dnodeGetDnodeId());
return TSDB_CODE_MND_DNODE_ID_NOT_CONFIGURED;
}
if (mnodeGetStatus() == MN_STATUS_READY) return 0;
return mnodeDeploy();
}
void dnodeProcessCreateMnodeReq(SRpcMsg *pMsg) {
int32_t code = dnodeStartMnode(pMsg);
SRpcMsg rspMsg = {.handle = pMsg->handle, .pCont = NULL, .contLen = 0, .code = code};
rpcSendResponse(&rspMsg);
rpcFreeCont(pMsg->pCont);
}
void dnodeProcessConfigDnodeReq(SRpcMsg *pMsg) {
SCfgDnodeMsg *pCfg = pMsg->pCont;
int32_t code = taosCfgDynamicOptions(pCfg->config);
SRpcMsg rspMsg = {.handle = pMsg->handle, .pCont = NULL, .contLen = 0, .code = code};
rpcSendResponse(&rspMsg);
rpcFreeCont(pMsg->pCont);
}
void dnodeProcessStartupReq(SRpcMsg *pMsg) {
dInfo("startup msg is received, cont:%s", (char *)pMsg->pCont);
SStartupStep *pStep = rpcMallocCont(sizeof(SStartupStep));
dnodeGetStartup(pStep);
dDebug("startup msg is sent, step:%s desc:%s finished:%d", pStep->name, pStep->desc, pStep->finished);
SRpcMsg rpcRsp = {.handle = pMsg->handle, .pCont = pStep, .contLen = sizeof(SStartupStep)};
rpcSendResponse(&rpcRsp);
rpcFreeCont(pMsg->pCont);
}
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#define _DEFAULT_SOURCE
#include "dnodeCheck.h"
#include "dnodeConfig.h"
#include "dnodeDnode.h"
#include "dnodeTransport.h"
#include "mnode.h"
#include "sync.h"
#include "tcache.h"
#include "tconfig.h"
#include "tnote.h"
#include "tstep.h"
#include "vnode.h"
#include "wal.h"
static struct {
EDnStat runStatus;
SStartupStep startup;
SSteps *steps;
} tsDnode;
EDnStat dnodeGetRunStat() { return tsDnode.runStatus; }
void dnodeSetRunStat(EDnStat stat) { tsDnode.runStatus = stat; }
void dnodeReportStartup(char *name, char *desc) {
SStartupStep *startup = &tsDnode.startup;
tstrncpy(startup->name, name, strlen(startup->name));
tstrncpy(startup->desc, desc, strlen(startup->desc));
startup->finished = 0;
}
static void dnodeReportStartupFinished(char *name, char *desc) {
SStartupStep *startup = &tsDnode.startup;
tstrncpy(startup->name, name, strlen(startup->name));
tstrncpy(startup->desc, desc, strlen(startup->desc));
startup->finished = 1;
}
void dnodeGetStartup(SStartupStep *pStep) { memcpy(pStep, &tsDnode.startup, sizeof(SStartupStep)); }
static int32_t dnodeInitVnode() {
return vnodeInit();
}
static int32_t dnodeInitMnode() {
SMnodePara para;
para.fp.GetDnodeEp = dnodeGetEp;
para.fp.SendMsgToDnode = dnodeSendMsgToDnode;
para.fp.SendMsgToMnode = dnodeSendMsgToMnode;
para.fp.SendRedirectMsg = dnodeSendRedirectMsg;
para.dnodeId = dnodeGetDnodeId();
para.clusterId = dnodeGetClusterId();
return mnodeInit(para);
}
static int32_t dnodeInitTfs() {}
static int32_t dnodeInitMain() {
tsDnode.runStatus = DN_RUN_STAT_STOPPED;
tscEmbedded = 1;
taosIgnSIGPIPE();
taosBlockSIGPIPE();
taosResolveCRC();
taosInitGlobalCfg();
taosReadGlobalLogCfg();
taosSetCoreDump(tsEnableCoreFile);
if (!taosMkDir(tsLogDir)) {
printf("failed to create dir: %s, reason: %s\n", tsLogDir, strerror(errno));
return -1;
}
char temp[TSDB_FILENAME_LEN];
sprintf(temp, "%s/taosdlog", tsLogDir);
if (taosInitLog(temp, tsNumOfLogLines, 1) < 0) {
printf("failed to init log file\n");
}
if (!taosReadGlobalCfg()) {
taosPrintGlobalCfg();
dError("TDengine read global config failed");
return -1;
}
dInfo("start to initialize TDengine");
taosInitNotes();
return taosCheckGlobalCfg();
}
static void dnodeCleanupMain() {
taos_cleanup();
taosCloseLog();
taosStopCacheRefreshWorker();
}
static int32_t dnodeCheckRunning(char *dir) {
char filepath[256] = {0};
snprintf(filepath, sizeof(filepath), "%s/.running", dir);
FileFd fd = taosOpenFileCreateWriteTrunc(filepath);
if (fd < 0) {
dError("failed to open lock file:%s since %s, quit", filepath, strerror(errno));
return -1;
}
int32_t ret = taosLockFile(fd);
if (ret != 0) {
dError("failed to lock file:%s since %s, quit", filepath, strerror(errno));
taosCloseFile(fd);
return -1;
}
return 0;
}
static int32_t dnodeInitDir() {
sprintf(tsMnodeDir, "%s/mnode", tsDataDir);
sprintf(tsVnodeDir, "%s/vnode", tsDataDir);
sprintf(tsDnodeDir, "%s/dnode", tsDataDir);
if (!taosMkDir(tsDnodeDir)) {
dError("failed to create dir:%s since %s", tsDnodeDir, strerror(errno));
return -1;
}
if (!taosMkDir(tsMnodeDir)) {
dError("failed to create dir:%s since %s", tsMnodeDir, strerror(errno));
return -1;
}
if (!taosMkDir(tsVnodeDir)) {
dError("failed to create dir:%s since %s", tsVnodeDir, strerror(errno));
return -1;
}
if (dnodeCheckRunning(tsDnodeDir) != 0) {
return -1;
}
return 0;
}
static void dnodeCleanupDir() {}
int32_t dnodeInit() {
SSteps *steps = taosStepInit(24, dnodeReportStartup);
if (steps == NULL) return -1;
taosStepAdd(steps, "dnode-main", dnodeInitMain, dnodeCleanupMain);
taosStepAdd(steps, "dnode-dir", dnodeInitDir, dnodeCleanupDir);
taosStepAdd(steps, "dnode-check", dnodeInitCheck, dnodeCleanupCheck);
taosStepAdd(steps, "dnode-rpc", rpcInit, rpcCleanup);
taosStepAdd(steps, "dnode-tfs", dnodeInitTfs, NULL);
taosStepAdd(steps, "dnode-wal", walInit, walCleanUp);
taosStepAdd(steps, "dnode-sync", syncInit, syncCleanUp);
taosStepAdd(steps, "dnode-eps", dnodeInitEps, dnodeCleanupEps);
taosStepAdd(steps, "dnode-vnode", dnodeInitVnode, vnodeCleanup);
taosStepAdd(steps, "dnode-mnode", dnodeInitMnode, mnodeCleanup);
taosStepAdd(steps, "dnode-trans", dnodeInitTrans, dnodeCleanupTrans);
taosStepAdd(steps, "dnode-msg", dnodeInitMsg, dnodeCleanupMsg);
tsDnode.steps = steps;
taosStepExec(tsDnode.steps);
dnodeSetRunStat(DN_RUN_STAT_RUNNING);
dnodeReportStartupFinished("TDengine", "initialized successfully");
dInfo("TDengine is initialized successfully");
return 0;
}
void dnodeCleanup() {
if (dnodeGetRunStat() != DN_RUN_STAT_STOPPED) {
dnodeSetRunStat(DN_RUN_STAT_STOPPED);
taosStepCleanup(tsDnode.steps);
tsDnode.steps = NULL;
}
}
// tsVnode.msgFp[TSDB_MSG_TYPE_MD_CREATE_VNODE] = vnodeProcessMgmtMsg;
// tsVnode.msgFp[TSDB_MSG_TYPE_MD_ALTER_VNODE] = vnodeProcessMgmtMsg;
// tsVnode.msgFp[TSDB_MSG_TYPE_MD_SYNC_VNODE] = vnodeProcessMgmtMsg;
// tsVnode.msgFp[TSDB_MSG_TYPE_MD_COMPACT_VNODE] = vnodeProcessMgmtMsg;
// tsVnode.msgFp[TSDB_MSG_TYPE_MD_DROP_VNODE] = vnodeProcessMgmtMsg;
// tsVnode.msgFp[TSDB_MSG_TYPE_MD_ALTER_STREAM] = vnodeProcessMgmtMsg;
\ No newline at end of file
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
*/ */
#include "os.h" #include "os.h"
#include "ulog.h" #include "ulog.h"
#include "dnode.h" #include "dnodeInt.h"
static bool stop = false; static bool stop = false;
static void sigintHandler(int32_t signum, void *info, void *ctx) { stop = true; } static void sigintHandler(int32_t signum, void *info, void *ctx) { stop = true; }
......
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/* this file is mainly responsible for the communication between DNODEs. Each
* dnode works as both server and client. Dnode may send status, grant, config
* messages to mnode, mnode may send create/alter/drop table/vnode messages
* to dnode. All theses messages are handled from here
*/
#define _DEFAULT_SOURCE
#include "dnodeTransport.h"
#include "dnodeConfig.h"
#include "dnodeDnode.h"
#include "dnodeMnode.h"
#include "dnodeVnodes.h"
#include "mnode.h"
#include "vnode.h"
typedef void (*MsgFp)(SRpcMsg *pMsg);
static struct {
void *serverRpc;
void *clientRpc;
void *shellRpc;
MsgFp msgFp[TSDB_MSG_TYPE_MAX];
} tsTrans;
static void dnodeInitMsgFp() {
// msg from client to dnode
tsTrans.msgFp[TSDB_MSG_TYPE_SUBMIT] = vnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_QUERY] = vnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_FETCH] = vnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_CREATE_TABLE] = vnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_DROP_TABLE] = vnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_ALTER_TABLE] = vnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_UPDATE_TAG_VAL] = vnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_TABLE_META] = vnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_TABLES_META] = vnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_STABLE_VGROUP] = mnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_MQ_QUERY] = vnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_MQ_CONSUME] = vnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_MQ_CONNECT] = vnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_MQ_DISCONNECT] = vnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_MQ_ACK] = vnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_MQ_RESET] = vnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_NETWORK_TEST] = dnodeProcessStartupReq;
// msg from client to mnode
tsTrans.msgFp[TSDB_MSG_TYPE_CONNECT] = mnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_CREATE_ACCT] = mnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_ALTER_ACCT] = mnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_DROP_ACCT] = mnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_CREATE_USER] = mnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_ALTER_USER] = mnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_DROP_USER] = mnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_CREATE_DNODE] = mnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_CONFIG_DNODE] = mnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_DROP_DNODE] = mnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_CREATE_DB] = mnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_DROP_DB] = mnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_USE_DB] = mnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_ALTER_DB] = mnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_SYNC_DB] = mnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_CREATE_TOPIC] = mnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_DROP_TOPIC] = mnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_ALTER_TOPIC] = mnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_CREATE_FUNCTION] = mnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_ALTER_FUNCTION] = mnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_DROP_FUNCTION] = mnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_CREATE_STABLE] = mnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_DROP_STABLE] = mnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_ALTER_STABLE] = mnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_KILL_QUERY] = mnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_KILL_CONN] = mnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_HEARTBEAT] = mnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_SHOW] = mnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_SHOW_RETRIEVE] = mnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_SHOW_RETRIEVE_FUNC] = mnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_COMPACT_VNODE] = mnodeProcessMsg;
// message from mnode to dnode
tsTrans.msgFp[TSDB_MSG_TYPE_CREATE_STABLE_IN] = vnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_CREATE_STABLE_IN_RSP] = mnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_CREATE_STABLE_IN] = vnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_ALTER_STABLE_IN_RSP] = mnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_CREATE_STABLE_IN] = vnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_DROP_STABLE_IN_RSP] = mnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_CREATE_VNODE_IN] = vnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_CREATE_VNODE_IN_RSP] = mnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_ALTER_VNODE_IN] = vnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_ALTER_VNODE_IN_RSP] = mnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_DROP_VNODE_IN] = vnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_DROP_VNODE_IN_RSP] = mnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_SYNC_VNODE_IN] = vnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_SYNC_VNODE_IN_RSP] = mnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_COMPACT_VNODE_IN] = vnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_COMPACT_VNODE_IN_RSP] = mnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_CREATE_MNODE_IN] = dnodeProcessCreateMnodeReq;
tsTrans.msgFp[TSDB_MSG_TYPE_CREATE_MNODE_IN_RSP] = mnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_DROP_MNODE_IN] = NULL;
tsTrans.msgFp[TSDB_MSG_TYPE_DROP_MNODE_IN_RSP] = mnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_CONFIG_DNODE_IN] = NULL;
tsTrans.msgFp[TSDB_MSG_TYPE_CONFIG_DNODE_IN_RSP] = mnodeProcessMsg;
// message from dnode to mnode
tsTrans.msgFp[TSDB_MSG_TYPE_DM_AUTH] = mnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_DM_AUTH_RSP] = NULL;
tsTrans.msgFp[TSDB_MSG_TYPE_DM_GRANT] = mnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_DM_GRANT_RSP] = NULL;
tsTrans.msgFp[TSDB_MSG_TYPE_DM_STATUS] = mnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_DM_STATUS_RSP] = dnodeProcessStatusRsp;
}
static void dnodeProcessPeerReq(SRpcMsg *pMsg, SRpcEpSet *pEpSet) {
SRpcMsg rspMsg = {.handle = pMsg->handle, .pCont = NULL, .contLen = 0};
int32_t msgType = pMsg->msgType;
if (msgType == TSDB_MSG_TYPE_NETWORK_TEST) {
dnodeProcessStartupReq(pMsg);
return;
}
if (dnodeGetRunStat() != DN_RUN_STAT_RUNNING) {
rspMsg.code = TSDB_CODE_APP_NOT_READY;
rpcSendResponse(&rspMsg);
rpcFreeCont(pMsg->pCont);
dTrace("RPC %p, peer req:%s is ignored since dnode not running", pMsg->handle, taosMsg[msgType]);
return;
}
if (pMsg->pCont == NULL) {
rspMsg.code = TSDB_CODE_DND_INVALID_MSG_LEN;
rpcSendResponse(&rspMsg);
return;
}
MsgFp fp = tsTrans.msgFp[msgType];
if (fp != NULL) {
dTrace("RPC %p, peer req:%s will be processed", pMsg->handle, taosMsg[msgType]);
(*fp)(pMsg);
} else {
dError("RPC %p, peer req:%s not processed", pMsg->handle, taosMsg[msgType]);
rspMsg.code = TSDB_CODE_DND_MSG_NOT_PROCESSED;
rpcSendResponse(&rspMsg);
rpcFreeCont(pMsg->pCont);
}
}
static int32_t dnodeInitServer() {
SRpcInit rpcInit;
memset(&rpcInit, 0, sizeof(rpcInit));
rpcInit.localPort = tsDnodeDnodePort;
rpcInit.label = "DND-S";
rpcInit.numOfThreads = 1;
rpcInit.cfp = dnodeProcessPeerReq;
rpcInit.sessions = TSDB_MAX_VNODES << 4;
rpcInit.connType = TAOS_CONN_SERVER;
rpcInit.idleTime = tsShellActivityTimer * 1000;
tsTrans.serverRpc = rpcOpen(&rpcInit);
if (tsTrans.serverRpc == NULL) {
dError("failed to init peer rpc server");
return -1;
}
dInfo("dnode peer rpc server is initialized");
return 0;
}
static void dnodeCleanupServer() {
if (tsTrans.serverRpc) {
rpcClose(tsTrans.serverRpc);
tsTrans.serverRpc = NULL;
dInfo("dnode peer server is closed");
}
}
static void dnodeProcessPeerRsp(SRpcMsg *pMsg, SRpcEpSet *pEpSet) {
int32_t msgType = pMsg->msgType;
if (dnodeGetRunStat() == DN_RUN_STAT_STOPPED) {
if (pMsg == NULL || pMsg->pCont == NULL) return;
dTrace("RPC %p, peer rsp:%s is ignored since dnode is stopping", pMsg->handle, taosMsg[msgType]);
rpcFreeCont(pMsg->pCont);
return;
}
if (msgType == TSDB_MSG_TYPE_DM_STATUS_RSP && pEpSet) {
dnodeUpdateMnodeEps(pEpSet);
}
MsgFp fp = tsTrans.msgFp[msgType];
if (fp != NULL) {
dTrace("RPC %p, peer rsp:%s will be processed", pMsg->handle, taosMsg[msgType]);
(*fp)(pMsg);
} else {
dDebug("RPC %p, peer rsp:%s not processed", pMsg->handle, taosMsg[msgType]);
}
rpcFreeCont(pMsg->pCont);
}
static int32_t dnodeInitClient() {
char secret[TSDB_KEY_LEN] = "secret";
SRpcInit rpcInit;
memset(&rpcInit, 0, sizeof(rpcInit));
rpcInit.label = "DND-C";
rpcInit.numOfThreads = 1;
rpcInit.cfp = dnodeProcessPeerRsp;
rpcInit.sessions = TSDB_MAX_VNODES << 4;
rpcInit.connType = TAOS_CONN_CLIENT;
rpcInit.idleTime = tsShellActivityTimer * 1000;
rpcInit.user = "t";
rpcInit.ckey = "key";
rpcInit.secret = secret;
tsTrans.clientRpc = rpcOpen(&rpcInit);
if (tsTrans.clientRpc == NULL) {
dError("failed to init peer rpc client");
return -1;
}
dInfo("dnode peer rpc client is initialized");
return 0;
}
static void dnodeCleanupClient() {
if (tsTrans.clientRpc) {
rpcClose(tsTrans.clientRpc);
tsTrans.clientRpc = NULL;
dInfo("dnode peer rpc client is closed");
}
}
static void dnodeProcessShellReq(SRpcMsg *pMsg, SRpcEpSet *pEpSet) {
SRpcMsg rspMsg = {.handle = pMsg->handle, .pCont = NULL, .contLen = 0};
int32_t msgType = pMsg->msgType;
if (dnodeGetRunStat() == DN_RUN_STAT_STOPPED) {
dError("RPC %p, shell req:%s is ignored since dnode exiting", pMsg->handle, taosMsg[msgType]);
rspMsg.code = TSDB_CODE_DND_EXITING;
rpcSendResponse(&rspMsg);
rpcFreeCont(pMsg->pCont);
return;
} else if (dnodeGetRunStat() != DN_RUN_STAT_RUNNING) {
dError("RPC %p, shell req:%s is ignored since dnode not running", pMsg->handle, taosMsg[msgType]);
rspMsg.code = TSDB_CODE_APP_NOT_READY;
rpcSendResponse(&rspMsg);
rpcFreeCont(pMsg->pCont);
return;
}
if (pMsg->pCont == NULL) {
rspMsg.code = TSDB_CODE_DND_INVALID_MSG_LEN;
rpcSendResponse(&rspMsg);
return;
}
MsgFp fp = tsTrans.msgFp[msgType];
if (fp != NULL) {
dTrace("RPC %p, shell req:%s will be processed", pMsg->handle, taosMsg[msgType]);
(*fp)(pMsg);
} else {
dError("RPC %p, shell req:%s is not processed", pMsg->handle, taosMsg[msgType]);
rspMsg.code = TSDB_CODE_DND_MSG_NOT_PROCESSED;
rpcSendResponse(&rspMsg);
rpcFreeCont(pMsg->pCont);
}
}
void dnodeSendMsgToDnode(SRpcEpSet *epSet, SRpcMsg *rpcMsg) { rpcSendRequest(tsTrans.clientRpc, epSet, rpcMsg, NULL); }
void dnodeSendMsgToMnode(SRpcMsg *rpcMsg) {
SRpcEpSet epSet = {0};
dnodeGetEpSetForPeer(&epSet);
dnodeSendMsgToDnode(&epSet, rpcMsg);
}
static void dnodeSendMsgToMnodeRecv(SRpcMsg *rpcMsg, SRpcMsg *rpcRsp) {
SRpcEpSet epSet = {0};
dnodeGetEpSetForPeer(&epSet);
rpcSendRecv(tsTrans.clientRpc, &epSet, rpcMsg, rpcRsp);
}
static int32_t dnodeRetrieveUserAuthInfo(char *user, char *spi, char *encrypt, char *secret, char *ckey) {
int32_t code = mnodeRetriveAuth(user, spi, encrypt, secret, ckey);
if (code != TSDB_CODE_APP_NOT_READY) return code;
SAuthMsg *pMsg = rpcMallocCont(sizeof(SAuthMsg));
tstrncpy(pMsg->user, user, sizeof(pMsg->user));
SRpcMsg rpcMsg = {0};
rpcMsg.pCont = pMsg;
rpcMsg.contLen = sizeof(SAuthMsg);
rpcMsg.msgType = TSDB_MSG_TYPE_DM_AUTH;
dDebug("user:%s, send auth msg to mnodes", user);
SRpcMsg rpcRsp = {0};
dnodeSendMsgToMnodeRecv(&rpcMsg, &rpcRsp);
if (rpcRsp.code != 0) {
dError("user:%s, auth msg received from mnodes, error:%s", user, tstrerror(rpcRsp.code));
} else {
SAuthRsp *pRsp = rpcRsp.pCont;
dDebug("user:%s, auth msg received from mnodes", user);
memcpy(secret, pRsp->secret, TSDB_KEY_LEN);
memcpy(ckey, pRsp->ckey, TSDB_KEY_LEN);
*spi = pRsp->spi;
*encrypt = pRsp->encrypt;
}
rpcFreeCont(rpcRsp.pCont);
return rpcRsp.code;
}
static int32_t dnodeInitShell() {
int32_t numOfThreads = (int32_t)((tsNumOfCores * tsNumOfThreadsPerCore) / 2.0);
if (numOfThreads < 1) {
numOfThreads = 1;
}
SRpcInit rpcInit;
memset(&rpcInit, 0, sizeof(rpcInit));
rpcInit.localPort = tsDnodeShellPort;
rpcInit.label = "SHELL";
rpcInit.numOfThreads = numOfThreads;
rpcInit.cfp = dnodeProcessShellReq;
rpcInit.sessions = tsMaxShellConns;
rpcInit.connType = TAOS_CONN_SERVER;
rpcInit.idleTime = tsShellActivityTimer * 1000;
rpcInit.afp = dnodeRetrieveUserAuthInfo;
tsTrans.shellRpc = rpcOpen(&rpcInit);
if (tsTrans.shellRpc == NULL) {
dError("failed to init shell rpc server");
return -1;
}
dInfo("dnode shell rpc server is initialized");
return 0;
}
static void dnodeCleanupShell() {
if (tsTrans.shellRpc) {
rpcClose(tsTrans.shellRpc);
tsTrans.shellRpc = NULL;
}
}
int32_t dnodeInitTrans() {
if (dnodeInitClient() != 0) {
return -1;
}
if (dnodeInitServer() != 0) {
return -1;
}
if (dnodeInitShell() != 0) {
return -1;
}
return 0;
}
void dnodeCleanupTrans() {
dnodeCleanupShell();
dnodeCleanupServer();
dnodeCleanupClient();
}
...@@ -81,7 +81,7 @@ static void mnodeDispatchToWriteQueue(SRpcMsg *pRpcMsg) { ...@@ -81,7 +81,7 @@ static void mnodeDispatchToWriteQueue(SRpcMsg *pRpcMsg) {
rpcSendResponse(&rpcRsp); rpcSendResponse(&rpcRsp);
} else { } else {
mTrace("msg:%p, app:%p type:%s is put into wqueue", pMsg, pMsg->rpcMsg.ahandle, taosMsg[pMsg->rpcMsg.msgType]); mTrace("msg:%p, app:%p type:%s is put into wqueue", pMsg, pMsg->rpcMsg.ahandle, taosMsg[pMsg->rpcMsg.msgType]);
taosWriteQitem(tsMworker.writeQ, TAOS_QTYPE_RPC, pMsg); taosWriteQitem(tsMworker.writeQ, pMsg);
} }
} }
...@@ -93,7 +93,7 @@ void mnodeReDispatchToWriteQueue(SMnMsg *pMsg) { ...@@ -93,7 +93,7 @@ void mnodeReDispatchToWriteQueue(SMnMsg *pMsg) {
mnodeSendRedirectMsg(&pMsg->rpcMsg, true); mnodeSendRedirectMsg(&pMsg->rpcMsg, true);
mnodeCleanupMsg(pMsg); mnodeCleanupMsg(pMsg);
} else { } else {
taosWriteQitem(tsMworker.writeQ, TAOS_QTYPE_RPC, pMsg); taosWriteQitem(tsMworker.writeQ, pMsg);
} }
} }
...@@ -107,7 +107,7 @@ static void mnodeDispatchToReadQueue(SRpcMsg *pRpcMsg) { ...@@ -107,7 +107,7 @@ static void mnodeDispatchToReadQueue(SRpcMsg *pRpcMsg) {
rpcSendResponse(&rpcRsp); rpcSendResponse(&rpcRsp);
} else { } else {
mTrace("msg:%p, app:%p type:%s is put into rqueue", pMsg, pMsg->rpcMsg.ahandle, taosMsg[pMsg->rpcMsg.msgType]); mTrace("msg:%p, app:%p type:%s is put into rqueue", pMsg, pMsg->rpcMsg.ahandle, taosMsg[pMsg->rpcMsg.msgType]);
taosWriteQitem(tsMworker.readQ, TAOS_QTYPE_RPC, pMsg); taosWriteQitem(tsMworker.readQ, pMsg);
} }
} }
...@@ -125,7 +125,7 @@ static void mnodeDispatchToPeerQueue(SRpcMsg *pRpcMsg) { ...@@ -125,7 +125,7 @@ static void mnodeDispatchToPeerQueue(SRpcMsg *pRpcMsg) {
} else { } else {
mTrace("msg:%p, app:%p type:%s is put into peer req queue", pMsg, pMsg->rpcMsg.ahandle, mTrace("msg:%p, app:%p type:%s is put into peer req queue", pMsg, pMsg->rpcMsg.ahandle,
taosMsg[pMsg->rpcMsg.msgType]); taosMsg[pMsg->rpcMsg.msgType]);
taosWriteQitem(tsMworker.peerReqQ, TAOS_QTYPE_RPC, pMsg); taosWriteQitem(tsMworker.peerReqQ, pMsg);
} }
} }
...@@ -140,13 +140,13 @@ void mnodeDispatchToPeerRspQueue(SRpcMsg *pRpcMsg) { ...@@ -140,13 +140,13 @@ void mnodeDispatchToPeerRspQueue(SRpcMsg *pRpcMsg) {
} else { } else {
mTrace("msg:%p, app:%p type:%s is put into peer rsp queue", pMsg, pMsg->rpcMsg.ahandle, mTrace("msg:%p, app:%p type:%s is put into peer rsp queue", pMsg, pMsg->rpcMsg.ahandle,
taosMsg[pMsg->rpcMsg.msgType]); taosMsg[pMsg->rpcMsg.msgType]);
taosWriteQitem(tsMworker.peerRspQ, TAOS_QTYPE_RPC, pMsg); taosWriteQitem(tsMworker.peerRspQ, pMsg);
} }
// rpcFreeCont(pRpcMsg->pCont); // rpcFreeCont(pRpcMsg->pCont);
} }
static void mnodeSendRpcRsp(void *ahandle, SMnMsg *pMsg, int32_t qtype, int32_t code) { void mnodeSendRsp(SMnMsg *pMsg, int32_t code) {
if (pMsg == NULL) return; if (pMsg == NULL) return;
if (code == TSDB_CODE_MND_ACTION_IN_PROGRESS) return; if (code == TSDB_CODE_MND_ACTION_IN_PROGRESS) return;
if (code == TSDB_CODE_MND_ACTION_NEED_REPROCESSED) { if (code == TSDB_CODE_MND_ACTION_NEED_REPROCESSED) {
...@@ -165,12 +165,6 @@ static void mnodeSendRpcRsp(void *ahandle, SMnMsg *pMsg, int32_t qtype, int32_t ...@@ -165,12 +165,6 @@ static void mnodeSendRpcRsp(void *ahandle, SMnMsg *pMsg, int32_t qtype, int32_t
mnodeCleanupMsg(pMsg); mnodeCleanupMsg(pMsg);
} }
void mnodeSendRsp(SMnMsg *pMsg, int32_t code) { mnodeSendRpcRsp(NULL, pMsg, 0, code); }
static void mnodeProcessPeerRspEnd(void *ahandle, SMnMsg *pMsg, int32_t qtype, int32_t code) {
mnodeCleanupMsg(pMsg);
}
static void mnodeInitMsgFp() { static void mnodeInitMsgFp() {
// // peer req // // peer req
// tsMworker.msgFp[TSDB_MSG_TYPE_DM_CONFIG_TABLE] = mnodeDispatchToPeerQueue; // tsMworker.msgFp[TSDB_MSG_TYPE_DM_CONFIG_TABLE] = mnodeDispatchToPeerQueue;
...@@ -207,96 +201,98 @@ static void mnodeInitMsgFp() { ...@@ -207,96 +201,98 @@ static void mnodeInitMsgFp() {
// tsMworker.peerRspFp[TSDB_MSG_TYPE_MD_DROP_VNODE_RSP] = mnodeProcessDropVnodeRsp; // tsMworker.peerRspFp[TSDB_MSG_TYPE_MD_DROP_VNODE_RSP] = mnodeProcessDropVnodeRsp;
// // read msg // // read msg
// tsMworker.msgFp[TSDB_MSG_TYPE_CM_HEARTBEAT] = mnodeDispatchToReadQueue; // tsMworker.msgFp[TSDB_MSG_TYPE_HEARTBEAT] = mnodeDispatchToReadQueue;
// tsMworker.readMsgFp[TSDB_MSG_TYPE_CM_HEARTBEAT] = mnodeProcessHeartBeatMsg; // tsMworker.readMsgFp[TSDB_MSG_TYPE_HEARTBEAT] = mnodeProcessHeartBeatMsg;
// tsMworker.msgFp[TSDB_MSG_TYPE_CM_CONNECT] = mnodeDispatchToReadQueue; // tsMworker.msgFp[TSDB_MSG_TYPE_CONNECT] = mnodeDispatchToReadQueue;
// tsMworker.readMsgFp[TSDB_MSG_TYPE_CM_CONNECT] = mnodeProcessConnectMsg; // tsMworker.readMsgFp[TSDB_MSG_TYPE_CONNECT] = mnodeProcessConnectMsg;
// tsMworker.msgFp[TSDB_MSG_TYPE_CM_USE_DB] = mnodeDispatchToReadQueue; // tsMworker.msgFp[TSDB_MSG_TYPE_USE_DB] = mnodeDispatchToReadQueue;
// tsMworker.readMsgFp[TSDB_MSG_TYPE_CM_USE_DB] = mnodeProcessUseMsg; // tsMworker.readMsgFp[TSDB_MSG_TYPE_USE_DB] = mnodeProcessUseMsg;
// tsMworker.msgFp[TSDB_MSG_TYPE_CM_TABLE_META] = mnodeDispatchToReadQueue; // tsMworker.msgFp[TSDB_MSG_TYPE_TABLE_META] = mnodeDispatchToReadQueue;
// tsMworker.readMsgFp[TSDB_MSG_TYPE_CM_TABLE_META] = mnodeProcessTableMetaMsg; // tsMworker.readMsgFp[TSDB_MSG_TYPE_TABLE_META] = mnodeProcessTableMetaMsg;
// tsMworker.msgFp[TSDB_MSG_TYPE_CM_TABLES_META] = mnodeDispatchToReadQueue; // tsMworker.msgFp[TSDB_MSG_TYPE_TABLES_META] = mnodeDispatchToReadQueue;
// tsMworker.readMsgFp[TSDB_MSG_TYPE_CM_TABLES_META] = mnodeProcessMultiTableMetaMsg; // tsMworker.readMsgFp[TSDB_MSG_TYPE_TABLES_META] = mnodeProcessMultiTableMetaMsg;
// tsMworker.msgFp[TSDB_MSG_TYPE_CM_STABLE_VGROUP] = mnodeDispatchToReadQueue; // tsMworker.msgFp[TSDB_MSG_TYPE_STABLE_VGROUP] = mnodeDispatchToReadQueue;
// tsMworker.readMsgFp[TSDB_MSG_TYPE_CM_STABLE_VGROUP] = mnodeProcessSuperTableVgroupMsg; // tsMworker.readMsgFp[TSDB_MSG_TYPE_STABLE_VGROUP] = mnodeProcessSuperTableVgroupMsg;
// tsMworker.msgFp[TSDB_MSG_TYPE_CM_SHOW] = mnodeDispatchToReadQueue; // tsMworker.msgFp[TSDB_MSG_TYPE_SHOW] = mnodeDispatchToReadQueue;
// tsMworker.readMsgFp[TSDB_MSG_TYPE_CM_SHOW] = mnodeProcessShowMsg; // tsMworker.readMsgFp[TSDB_MSG_TYPE_SHOW] = mnodeProcessShowMsg;
// tsMworker.msgFp[TSDB_MSG_TYPE_CM_RETRIEVE] = mnodeDispatchToReadQueue; // tsMworker.msgFp[TSDB_MSG_TYPE_SHOW_RETRIEVE] = mnodeDispatchToReadQueue;
// tsMworker.readMsgFp[TSDB_MSG_TYPE_CM_RETRIEVE] = mnodeProcessRetrieveMsg; // tsMworker.readMsgFp[TSDB_MSG_TYPE_SHOW_RETRIEVE] = mnodeProcessRetrieveMsg;
// tsMworker.msgFp[TSDB_MSG_TYPE_CM_RETRIEVE_FUNC] = mnodeDispatchToReadQueue; // tsMworker.msgFp[TSDB_MSG_TYPE_RETRIEVE_FUNC] = mnodeDispatchToReadQueue;
// tsMworker.readMsgFp[TSDB_MSG_TYPE_CM_RETRIEVE_FUNC] = mnodeProcessRetrieveFuncReq; // tsMworker.readMsgFp[TSDB_MSG_TYPE_RETRIEVE_FUNC] = mnodeProcessRetrieveFuncReq;
// // tsMworker.msgFp[TSDB_MSG_TYPE_CM_CREATE_ACCT] = mnodeDispatchToWriteQueue; // // tsMworker.msgFp[TSDB_MSG_TYPE_CREATE_ACCT] = mnodeDispatchToWriteQueue;
// // tsMworker.readMsgFp[TSDB_MSG_TYPE_CM_CREATE_ACCT] = acctProcessCreateAcctMsg; // // tsMworker.readMsgFp[TSDB_MSG_TYPE_CREATE_ACCT] = acctProcessCreateAcctMsg;
// // tsMworker.msgFp[TSDB_MSG_TYPE_CM_ALTER_ACCT] = mnodeDispatchToWriteQueue; // // tsMworker.msgFp[TSDB_MSG_TYPE_ALTER_ACCT] = mnodeDispatchToWriteQueue;
// // tsMworker.readMsgFp[TSDB_MSG_TYPE_CM_ALTER_ACCT] = acctProcessDropAcctMsg; // // tsMworker.readMsgFp[TSDB_MSG_TYPE_ALTER_ACCT] = acctProcessDropAcctMsg;
// // tsMworker.msgFp[TSDB_MSG_TYPE_CM_DROP_ACCT] = mnodeDispatchToWriteQueue; // // tsMworker.msgFp[TSDB_MSG_TYPE_DROP_ACCT] = mnodeDispatchToWriteQueue;
// // tsMworker.readMsgFp[TSDB_MSG_TYPE_CM_DROP_ACCT] = acctProcessAlterAcctMsg; // // tsMworker.readMsgFp[TSDB_MSG_TYPE_DROP_ACCT] = acctProcessAlterAcctMsg;
// // write msg // // write msg
// tsMworker.msgFp[TSDB_MSG_TYPE_CM_CREATE_USER] = mnodeDispatchToWriteQueue; // tsMworker.msgFp[TSDB_MSG_TYPE_CREATE_USER] = mnodeDispatchToWriteQueue;
// tsMworker.writeMsgFp[TSDB_MSG_TYPE_CM_CREATE_USER] = mnodeProcessCreateUserMsg; // tsMworker.writeMsgFp[TSDB_MSG_TYPE_CREATE_USER] = mnodeProcessCreateUserMsg;
// tsMworker.msgFp[TSDB_MSG_TYPE_CM_ALTER_USER] = mnodeDispatchToWriteQueue; // tsMworker.msgFp[TSDB_MSG_TYPE_ALTER_USER] = mnodeDispatchToWriteQueue;
// tsMworker.writeMsgFp[TSDB_MSG_TYPE_CM_ALTER_USER] = mnodeProcessAlterUserMsg; // tsMworker.writeMsgFp[TSDB_MSG_TYPE_ALTER_USER] = mnodeProcessAlterUserMsg;
// tsMworker.msgFp[TSDB_MSG_TYPE_CM_DROP_USER] = mnodeDispatchToWriteQueue; // tsMworker.msgFp[TSDB_MSG_TYPE_DROP_USER] = mnodeDispatchToWriteQueue;
// tsMworker.writeMsgFp[TSDB_MSG_TYPE_CM_DROP_USER] = mnodeProcessDropUserMsg; // tsMworker.writeMsgFp[TSDB_MSG_TYPE_DROP_USER] = mnodeProcessDropUserMsg;
// tsMworker.msgFp[TSDB_MSG_TYPE_CM_CREATE_DNODE] = mnodeDispatchToWriteQueue; // tsMworker.msgFp[TSDB_MSG_TYPE_CREATE_DNODE] = mnodeDispatchToWriteQueue;
// tsMworker.writeMsgFp[TSDB_MSG_TYPE_CM_CREATE_DNODE] = mnodeProcessCreateDnodeMsg; // tsMworker.writeMsgFp[TSDB_MSG_TYPE_CREATE_DNODE] = mnodeProcessCreateDnodeMsg;
// tsMworker.msgFp[TSDB_MSG_TYPE_CM_DROP_DNODE] = mnodeDispatchToWriteQueue; // tsMworker.msgFp[TSDB_MSG_TYPE_DROP_DNODE] = mnodeDispatchToWriteQueue;
// tsMworker.writeMsgFp[TSDB_MSG_TYPE_CM_DROP_DNODE] = mnodeProcessDropDnodeMsg; // tsMworker.writeMsgFp[TSDB_MSG_TYPE_DROP_DNODE] = mnodeProcessDropDnodeMsg;
// tsMworker.msgFp[TSDB_MSG_TYPE_CM_CONFIG_DNODE] = mnodeDispatchToWriteQueue; // tsMworker.msgFp[TSDB_MSG_TYPE_CONFIG_DNODE] = mnodeDispatchToWriteQueue;
// tsMworker.writeMsgFp[TSDB_MSG_TYPE_CM_CONFIG_DNODE] = mnodeProcessCfgDnodeMsg; // tsMworker.writeMsgFp[TSDB_MSG_TYPE_CONFIG_DNODE] = mnodeProcessCfgDnodeMsg;
// tsMworker.msgFp[TSDB_MSG_TYPE_CM_CREATE_DB] = mnodeDispatchToWriteQueue; // tsMworker.msgFp[TSDB_MSG_TYPE_CREATE_DB] = mnodeDispatchToWriteQueue;
// tsMworker.writeMsgFp[TSDB_MSG_TYPE_CM_CREATE_DB] = mnodeProcessCreateDbMsg; // tsMworker.writeMsgFp[TSDB_MSG_TYPE_CREATE_DB] = mnodeProcessCreateDbMsg;
// tsMworker.msgFp[TSDB_MSG_TYPE_CM_ALTER_DB] = mnodeDispatchToWriteQueue; // tsMworker.msgFp[TSDB_MSG_TYPE_ALTER_DB] = mnodeDispatchToWriteQueue;
// tsMworker.writeMsgFp[TSDB_MSG_TYPE_CM_ALTER_DB] = mnodeProcessAlterDbMsg; // tsMworker.writeMsgFp[TSDB_MSG_TYPE_ALTER_DB] = mnodeProcessAlterDbMsg;
// tsMworker.msgFp[TSDB_MSG_TYPE_CM_DROP_DB] = mnodeDispatchToWriteQueue; // tsMworker.msgFp[TSDB_MSG_TYPE_DROP_DB] = mnodeDispatchToWriteQueue;
// tsMworker.writeMsgFp[TSDB_MSG_TYPE_CM_DROP_DB] = mnodeProcessDropDbMsg; // tsMworker.writeMsgFp[TSDB_MSG_TYPE_DROP_DB] = mnodeProcessDropDbMsg;
// tsMworker.msgFp[TSDB_MSG_TYPE_CM_SYNC_DB] = mnodeDispatchToWriteQueue; // tsMworker.msgFp[TSDB_MSG_TYPE_SYNC_DB] = mnodeDispatchToWriteQueue;
// tsMworker.writeMsgFp[TSDB_MSG_TYPE_CM_SYNC_DB] = mnodeProcessSyncDbMsg; // tsMworker.writeMsgFp[TSDB_MSG_TYPE_SYNC_DB] = mnodeProcessSyncDbMsg;
// tsMworker.msgFp[TSDB_MSG_TYPE_CM_COMPACT_VNODE] = mnodeDispatchToWriteQueue; // tsMworker.msgFp[TSDB_MSG_TYPE_COMPACT_VNODE] = mnodeDispatchToWriteQueue;
// tsMworker.writeMsgFp[TSDB_MSG_TYPE_CM_COMPACT_VNODE] = mnodeProcessCompactMsg; // tsMworker.writeMsgFp[TSDB_MSG_TYPE_COMPACT_VNODE] = mnodeProcessCompactMsg;
// tsMworker.msgFp[TSDB_MSG_TYPE_CM_CREATE_FUNCTION] = mnodeDispatchToWriteQueue; // tsMworker.msgFp[TSDB_MSG_TYPE_CREATE_FUNCTION] = mnodeDispatchToWriteQueue;
// tsMworker.writeMsgFp[TSDB_MSG_TYPE_CM_CREATE_FUNCTION] = mnodeProcessCreateFuncMsg; // tsMworker.writeMsgFp[TSDB_MSG_TYPE_CREATE_FUNCTION] = mnodeProcessCreateFuncMsg;
// tsMworker.msgFp[TSDB_MSG_TYPE_CM_DROP_FUNCTION] = mnodeDispatchToWriteQueue; // tsMworker.msgFp[TSDB_MSG_TYPE_DROP_FUNCTION] = mnodeDispatchToWriteQueue;
// tsMworker.writeMsgFp[TSDB_MSG_TYPE_CM_DROP_FUNCTION] = mnodeProcessDropFuncMsg; // tsMworker.writeMsgFp[TSDB_MSG_TYPE_DROP_FUNCTION] = mnodeProcessDropFuncMsg;
// // tsMworker.msgFp[TSDB_MSG_TYPE_CM_CREATE_TP] = mnodeDispatchToWriteQueue; // // tsMworker.msgFp[TSDB_MSG_TYPE_CREATE_TP] = mnodeDispatchToWriteQueue;
// // tsMworker.readMsgFp[TSDB_MSG_TYPE_CM_CREATE_TP] = tpProcessCreateTpMsg; // // tsMworker.readMsgFp[TSDB_MSG_TYPE_CREATE_TP] = tpProcessCreateTpMsg;
// // tsMworker.msgFp[TSDB_MSG_TYPE_CM_DROP_TP] = mnodeDispatchToWriteQueue; // // tsMworker.msgFp[TSDB_MSG_TYPE_DROP_TP] = mnodeDispatchToWriteQueue;
// // tsMworker.readMsgFp[TSDB_MSG_TYPE_CM_DROP_TP] = tpProcessAlterTpMsg; // // tsMworker.readMsgFp[TSDB_MSG_TYPE_DROP_TP] = tpProcessAlterTpMsg;
// // tsMworker.msgFp[TSDB_MSG_TYPE_CM_ALTER_TP] = mnodeDispatchToWriteQueue; // // tsMworker.msgFp[TSDB_MSG_TYPE_ALTER_TP] = mnodeDispatchToWriteQueue;
// // tsMworker.readMsgFp[TSDB_MSG_TYPE_CM_ALTER_TP] = tpProcessDropTpMsg; // // tsMworker.readMsgFp[TSDB_MSG_TYPE_ALTER_TP] = tpProcessDropTpMsg;
// tsMworker.msgFp[TSDB_MSG_TYPE_CM_CREATE_TABLE] = mnodeDispatchToWriteQueue; // tsMworker.msgFp[TSDB_MSG_TYPE_CREATE_TABLE] = mnodeDispatchToWriteQueue;
// tsMworker.writeMsgFp[TSDB_MSG_TYPE_CM_CREATE_TABLE] = mnodeProcessCreateTableMsg; // tsMworker.writeMsgFp[TSDB_MSG_TYPE_CREATE_TABLE] = mnodeProcessCreateTableMsg;
// tsMworker.msgFp[TSDB_MSG_TYPE_CM_DROP_TABLE] = mnodeDispatchToWriteQueue; // tsMworker.msgFp[TSDB_MSG_TYPE_DROP_TABLE] = mnodeDispatchToWriteQueue;
// tsMworker.writeMsgFp[TSDB_MSG_TYPE_CM_DROP_TABLE] = mnodeProcessDropTableMsg; // tsMworker.writeMsgFp[TSDB_MSG_TYPE_DROP_TABLE] = mnodeProcessDropTableMsg;
// tsMworker.msgFp[TSDB_MSG_TYPE_CM_ALTER_TABLE] = mnodeDispatchToWriteQueue; // tsMworker.msgFp[TSDB_MSG_TYPE_ALTER_TABLE] = mnodeDispatchToWriteQueue;
// tsMworker.writeMsgFp[TSDB_MSG_TYPE_CM_ALTER_TABLE] = mnodeProcessAlterTableMsg; // tsMworker.writeMsgFp[TSDB_MSG_TYPE_ALTER_TABLE] = mnodeProcessAlterTableMsg;
// tsMworker.msgFp[TSDB_MSG_TYPE_CM_ALTER_STREAM] = mnodeDispatchToWriteQueue; // tsMworker.msgFp[TSDB_MSG_TYPE_ALTER_STREAM] = mnodeDispatchToWriteQueue;
// tsMworker.writeMsgFp[TSDB_MSG_TYPE_CM_ALTER_STREAM] = NULL; // tsMworker.writeMsgFp[TSDB_MSG_TYPE_ALTER_STREAM] = NULL;
// tsMworker.msgFp[TSDB_MSG_TYPE_CM_KILL_QUERY] = mnodeDispatchToWriteQueue; // tsMworker.msgFp[TSDB_MSG_TYPE_KILL_QUERY] = mnodeDispatchToWriteQueue;
// tsMworker.writeMsgFp[TSDB_MSG_TYPE_CM_KILL_QUERY] = mnodeProcessKillQueryMsg; // tsMworker.writeMsgFp[TSDB_MSG_TYPE_KILL_QUERY] = mnodeProcessKillQueryMsg;
// tsMworker.msgFp[TSDB_MSG_TYPE_CM_KILL_STREAM] = mnodeDispatchToWriteQueue; // tsMworker.msgFp[TSDB_MSG_TYPE_KILL_STREAM] = mnodeDispatchToWriteQueue;
// tsMworker.writeMsgFp[TSDB_MSG_TYPE_CM_KILL_STREAM] = mnodeProcessKillStreamMsg; // tsMworker.writeMsgFp[TSDB_MSG_TYPE_KILL_STREAM] = mnodeProcessKillStreamMsg;
// tsMworker.msgFp[TSDB_MSG_TYPE_CM_KILL_CONN] = mnodeDispatchToWriteQueue; // tsMworker.msgFp[TSDB_MSG_TYPE_KILL_CONN] = mnodeDispatchToWriteQueue;
// tsMworker.writeMsgFp[TSDB_MSG_TYPE_CM_KILL_CONN] = mnodeProcessKillConnectionMsg; // tsMworker.writeMsgFp[TSDB_MSG_TYPE_KILL_CONN] = mnodeProcessKillConnectionMsg;
} }
static int32_t mnodeProcessWriteReq(void *unused, SMnMsg *pMsg, int32_t qtype) { static void mnodeProcessWriteReq(SMnMsg *pMsg, void *unused) {
int32_t msgType = pMsg->rpcMsg.msgType; int32_t msgType = pMsg->rpcMsg.msgType;
void *ahandle = pMsg->rpcMsg.ahandle; void *ahandle = pMsg->rpcMsg.ahandle;
int32_t code = 0;
if (pMsg->rpcMsg.pCont == NULL) { if (pMsg->rpcMsg.pCont == NULL) {
mError("msg:%p, app:%p type:%s content is null", pMsg, ahandle, taosMsg[msgType]); mError("msg:%p, app:%p type:%s content is null", pMsg, ahandle, taosMsg[msgType]);
return TSDB_CODE_MND_INVALID_MSG_LEN; code = TSDB_CODE_MND_INVALID_MSG_LEN;
goto PROCESS_WRITE_REQ_END;
} }
if (!mnodeIsMaster()) { if (!mnodeIsMaster()) {
...@@ -309,31 +305,39 @@ static int32_t mnodeProcessWriteReq(void *unused, SMnMsg *pMsg, int32_t qtype) { ...@@ -309,31 +305,39 @@ static int32_t mnodeProcessWriteReq(void *unused, SMnMsg *pMsg, int32_t qtype) {
mDebug("msg:%p, app:%p type:%s in write queue, is redirected, numOfEps:%d inUse:%d", pMsg, ahandle, mDebug("msg:%p, app:%p type:%s in write queue, is redirected, numOfEps:%d inUse:%d", pMsg, ahandle,
taosMsg[msgType], epSet->numOfEps, epSet->inUse); taosMsg[msgType], epSet->numOfEps, epSet->inUse);
return TSDB_CODE_RPC_REDIRECT; code = TSDB_CODE_RPC_REDIRECT;
goto PROCESS_WRITE_REQ_END;
} }
if (tsMworker.writeMsgFp[msgType] == NULL) { if (tsMworker.writeMsgFp[msgType] == NULL) {
mError("msg:%p, app:%p type:%s not processed", pMsg, ahandle, taosMsg[msgType]); mError("msg:%p, app:%p type:%s not processed", pMsg, ahandle, taosMsg[msgType]);
return TSDB_CODE_MND_MSG_NOT_PROCESSED; code = TSDB_CODE_MND_MSG_NOT_PROCESSED;
goto PROCESS_WRITE_REQ_END;
} }
return (*tsMworker.writeMsgFp[msgType])(pMsg); code = (*tsMworker.writeMsgFp[msgType])(pMsg);
PROCESS_WRITE_REQ_END:
mnodeSendRsp(pMsg, code);
} }
static int32_t mnodeProcessReadReq(void* unused, SMnMsg *pMsg, int32_t qtype) { static void mnodeProcessReadReq(SMnMsg *pMsg, void *unused) {
int32_t msgType = pMsg->rpcMsg.msgType; int32_t msgType = pMsg->rpcMsg.msgType;
void *ahandle = pMsg->rpcMsg.ahandle; void *ahandle = pMsg->rpcMsg.ahandle;
int32_t code = 0;
if (pMsg->rpcMsg.pCont == NULL) { if (pMsg->rpcMsg.pCont == NULL) {
mError("msg:%p, app:%p type:%s in mread queue, content is null", pMsg, ahandle, taosMsg[msgType]); mError("msg:%p, app:%p type:%s in mread queue, content is null", pMsg, ahandle, taosMsg[msgType]);
return TSDB_CODE_MND_INVALID_MSG_LEN; code = TSDB_CODE_MND_INVALID_MSG_LEN;
goto PROCESS_READ_REQ_END;
} }
if (!mnodeIsMaster()) { if (!mnodeIsMaster()) {
SMnRsp *rpcRsp = &pMsg->rpcRsp; SMnRsp *rpcRsp = &pMsg->rpcRsp;
SRpcEpSet *epSet = rpcMallocCont(sizeof(SRpcEpSet)); SRpcEpSet *epSet = rpcMallocCont(sizeof(SRpcEpSet));
if (!epSet) { if (!epSet) {
return TSDB_CODE_MND_OUT_OF_MEMORY; code = TSDB_CODE_MND_OUT_OF_MEMORY;
goto PROCESS_READ_REQ_END;
} }
mnodeGetMnodeEpSetForShell(epSet, true); mnodeGetMnodeEpSetForShell(epSet, true);
rpcRsp->rsp = epSet; rpcRsp->rsp = epSet;
...@@ -341,25 +345,32 @@ static int32_t mnodeProcessReadReq(void* unused, SMnMsg *pMsg, int32_t qtype) { ...@@ -341,25 +345,32 @@ static int32_t mnodeProcessReadReq(void* unused, SMnMsg *pMsg, int32_t qtype) {
mDebug("msg:%p, app:%p type:%s in mread queue is redirected, numOfEps:%d inUse:%d", pMsg, ahandle, taosMsg[msgType], mDebug("msg:%p, app:%p type:%s in mread queue is redirected, numOfEps:%d inUse:%d", pMsg, ahandle, taosMsg[msgType],
epSet->numOfEps, epSet->inUse); epSet->numOfEps, epSet->inUse);
return TSDB_CODE_RPC_REDIRECT; code = TSDB_CODE_RPC_REDIRECT;
goto PROCESS_READ_REQ_END;
} }
if (tsMworker.readMsgFp[msgType] == NULL) { if (tsMworker.readMsgFp[msgType] == NULL) {
mError("msg:%p, app:%p type:%s in mread queue, not processed", pMsg, ahandle, taosMsg[msgType]); mError("msg:%p, app:%p type:%s in mread queue, not processed", pMsg, ahandle, taosMsg[msgType]);
return TSDB_CODE_MND_MSG_NOT_PROCESSED; code = TSDB_CODE_MND_MSG_NOT_PROCESSED;
goto PROCESS_READ_REQ_END;
} }
mTrace("msg:%p, app:%p type:%s will be processed in mread queue", pMsg, ahandle, taosMsg[msgType]); mTrace("msg:%p, app:%p type:%s will be processed in mread queue", pMsg, ahandle, taosMsg[msgType]);
return (*tsMworker.readMsgFp[msgType])(pMsg); code = (*tsMworker.readMsgFp[msgType])(pMsg);
PROCESS_READ_REQ_END:
mnodeSendRsp(pMsg, code);
} }
static int32_t mnodeProcessPeerReq(void *unused, SMnMsg *pMsg, int32_t qtype) { static void mnodeProcessPeerReq(SMnMsg *pMsg, void *unused) {
int32_t msgType = pMsg->rpcMsg.msgType; int32_t msgType = pMsg->rpcMsg.msgType;
void * ahandle = pMsg->rpcMsg.ahandle; void *ahandle = pMsg->rpcMsg.ahandle;
int32_t code = 0;
if (pMsg->rpcMsg.pCont == NULL) { if (pMsg->rpcMsg.pCont == NULL) {
mError("msg:%p, ahandle:%p type:%s in mpeer queue, content is null", pMsg, ahandle, taosMsg[msgType]); mError("msg:%p, ahandle:%p type:%s in mpeer queue, content is null", pMsg, ahandle, taosMsg[msgType]);
return TSDB_CODE_MND_INVALID_MSG_LEN; code = TSDB_CODE_MND_INVALID_MSG_LEN;
goto PROCESS_PEER_REQ_END;
} }
if (!mnodeIsMaster()) { if (!mnodeIsMaster()) {
...@@ -372,24 +383,29 @@ static int32_t mnodeProcessPeerReq(void *unused, SMnMsg *pMsg, int32_t qtype) { ...@@ -372,24 +383,29 @@ static int32_t mnodeProcessPeerReq(void *unused, SMnMsg *pMsg, int32_t qtype) {
mDebug("msg:%p, ahandle:%p type:%s in mpeer queue is redirected, numOfEps:%d inUse:%d", pMsg, ahandle, mDebug("msg:%p, ahandle:%p type:%s in mpeer queue is redirected, numOfEps:%d inUse:%d", pMsg, ahandle,
taosMsg[msgType], epSet->numOfEps, epSet->inUse); taosMsg[msgType], epSet->numOfEps, epSet->inUse);
return TSDB_CODE_RPC_REDIRECT; code = TSDB_CODE_RPC_REDIRECT;
goto PROCESS_PEER_REQ_END;
} }
if (tsMworker.peerReqFp[msgType] == NULL) { if (tsMworker.peerReqFp[msgType] == NULL) {
mError("msg:%p, ahandle:%p type:%s in mpeer queue, not processed", pMsg, ahandle, taosMsg[msgType]); mError("msg:%p, ahandle:%p type:%s in mpeer queue, not processed", pMsg, ahandle, taosMsg[msgType]);
return TSDB_CODE_MND_MSG_NOT_PROCESSED; code = TSDB_CODE_MND_MSG_NOT_PROCESSED;
goto PROCESS_PEER_REQ_END;
} }
return (*tsMworker.peerReqFp[msgType])(pMsg); code = (*tsMworker.peerReqFp[msgType])(pMsg);
PROCESS_PEER_REQ_END:
mnodeSendRsp(pMsg, code);
} }
static int32_t mnodeProcessPeerRsp(void *ahandle, SMnMsg *pMsg, int32_t qtype) { static void mnodeProcessPeerRsp(SMnMsg *pMsg, void *unused) {
int32_t msgType = pMsg->rpcMsg.msgType; int32_t msgType = pMsg->rpcMsg.msgType;
SRpcMsg *pRpcMsg = &pMsg->rpcMsg; SRpcMsg *pRpcMsg = &pMsg->rpcMsg;
if (!mnodeIsMaster()) { if (!mnodeIsMaster()) {
mError("msg:%p, ahandle:%p type:%s not processed for not master", pRpcMsg, pRpcMsg->ahandle, taosMsg[msgType]); mError("msg:%p, ahandle:%p type:%s not processed for not master", pRpcMsg, pRpcMsg->ahandle, taosMsg[msgType]);
return 0; mnodeCleanupMsg(pMsg);
} }
if (tsMworker.peerRspFp[msgType]) { if (tsMworker.peerRspFp[msgType]) {
...@@ -398,7 +414,7 @@ static int32_t mnodeProcessPeerRsp(void *ahandle, SMnMsg *pMsg, int32_t qtype) { ...@@ -398,7 +414,7 @@ static int32_t mnodeProcessPeerRsp(void *ahandle, SMnMsg *pMsg, int32_t qtype) {
mError("msg:%p, ahandle:%p type:%s is not processed", pRpcMsg, pRpcMsg->ahandle, taosMsg[msgType]); mError("msg:%p, ahandle:%p type:%s is not processed", pRpcMsg, pRpcMsg->ahandle, taosMsg[msgType]);
} }
return 0; mnodeCleanupMsg(pMsg);
} }
int32_t mnodeInitWorker() { int32_t mnodeInitWorker() {
...@@ -406,20 +422,16 @@ int32_t mnodeInitWorker() { ...@@ -406,20 +422,16 @@ int32_t mnodeInitWorker() {
SWorkerPool *pPool = &tsMworker.write; SWorkerPool *pPool = &tsMworker.write;
pPool->name = "mnode-write"; pPool->name = "mnode-write";
pPool->startFp = (ProcessStartFp)mnodeProcessWriteReq;
pPool->endFp = (ProcessEndFp)mnodeSendRpcRsp;
pPool->min = 1; pPool->min = 1;
pPool->max = 1; pPool->max = 1;
if (tWorkerInit(pPool) != 0) { if (tWorkerInit(pPool) != 0) {
return TSDB_CODE_MND_OUT_OF_MEMORY; return TSDB_CODE_MND_OUT_OF_MEMORY;
} else { } else {
tsMworker.writeQ = tWorkerAllocQueue(pPool, NULL); tsMworker.writeQ = tWorkerAllocQueue(pPool, NULL, (FProcessItem)mnodeProcessWriteReq);
} }
pPool = &tsMworker.read; pPool = &tsMworker.read;
pPool->name = "mnode-read"; pPool->name = "mnode-read";
pPool->startFp = (ProcessStartFp)mnodeProcessReadReq;
pPool->endFp = (ProcessEndFp)mnodeSendRpcRsp;
pPool->min = 2; pPool->min = 2;
pPool->max = (int32_t)(tsNumOfCores * tsNumOfThreadsPerCore / 2); pPool->max = (int32_t)(tsNumOfCores * tsNumOfThreadsPerCore / 2);
pPool->max = MAX(2, pPool->max); pPool->max = MAX(2, pPool->max);
...@@ -427,31 +439,27 @@ int32_t mnodeInitWorker() { ...@@ -427,31 +439,27 @@ int32_t mnodeInitWorker() {
if (tWorkerInit(pPool) != 0) { if (tWorkerInit(pPool) != 0) {
return TSDB_CODE_MND_OUT_OF_MEMORY; return TSDB_CODE_MND_OUT_OF_MEMORY;
} else { } else {
tsMworker.readQ = tWorkerAllocQueue(pPool, NULL); tsMworker.readQ = tWorkerAllocQueue(pPool, NULL, (FProcessItem)mnodeProcessReadReq);
} }
pPool = &tsMworker.peerReq; pPool = &tsMworker.peerReq;
pPool->name = "mnode-peer-req"; pPool->name = "mnode-peer-req";
pPool->startFp = (ProcessStartFp)mnodeProcessPeerReq;
pPool->endFp = (ProcessEndFp)mnodeSendRpcRsp;
pPool->min = 1; pPool->min = 1;
pPool->max = 1; pPool->max = 1;
if (tWorkerInit(pPool) != 0) { if (tWorkerInit(pPool) != 0) {
return TSDB_CODE_MND_OUT_OF_MEMORY; return TSDB_CODE_MND_OUT_OF_MEMORY;
} else { } else {
tsMworker.peerReqQ = tWorkerAllocQueue(pPool, NULL); tsMworker.peerReqQ = tWorkerAllocQueue(pPool, NULL, (FProcessItem)mnodeProcessPeerReq);
} }
pPool = &tsMworker.peerRsp; pPool = &tsMworker.peerRsp;
pPool->name = "mnode-peer-rsp"; pPool->name = "mnode-peer-rsp";
pPool->startFp = (ProcessStartFp)mnodeProcessPeerRsp;
pPool->endFp = (ProcessEndFp)mnodeProcessPeerRspEnd;
pPool->min = 1; pPool->min = 1;
pPool->max = 1; pPool->max = 1;
if (tWorkerInit(pPool) != 0) { if (tWorkerInit(pPool) != 0) {
return TSDB_CODE_MND_OUT_OF_MEMORY; return TSDB_CODE_MND_OUT_OF_MEMORY;
} else { } else {
tsMworker.peerRspQ = tWorkerAllocQueue(pPool, NULL); tsMworker.peerRspQ = tWorkerAllocQueue(pPool, NULL, (FProcessItem)mnodeProcessPeerRsp);
} }
mInfo("mnode worker is initialized"); mInfo("mnode worker is initialized");
......
...@@ -16,6 +16,8 @@ ...@@ -16,6 +16,8 @@
#ifndef _TD_VNODE_COMMIT_H_ #ifndef _TD_VNODE_COMMIT_H_
#define _TD_VNODE_COMMIT_H_ #define _TD_VNODE_COMMIT_H_
#include "vnodeInt.h"
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
#endif #endif
......
...@@ -16,19 +16,14 @@ ...@@ -16,19 +16,14 @@
#ifndef _TD_VNODE_INT_H_ #ifndef _TD_VNODE_INT_H_
#define _TD_VNODE_INT_H_ #define _TD_VNODE_INT_H_
#include "os.h" #include "vnode.h"
#include "amalloc.h" #include "amalloc.h"
#include "meta.h" #include "meta.h"
#include "sync.h" #include "sync.h"
#include "taosmsg.h"
#include "tglobal.h"
#include "tlog.h" #include "tlog.h"
#include "tq.h" #include "tq.h"
#include "tqueue.h"
#include "trpc.h"
#include "tsdb.h" #include "tsdb.h"
#include "tworker.h"
#include "vnode.h"
#include "wal.h" #include "wal.h"
#ifdef __cplusplus #ifdef __cplusplus
...@@ -44,71 +39,16 @@ extern int32_t vDebugFlag; ...@@ -44,71 +39,16 @@ extern int32_t vDebugFlag;
#define vDebug(...) { if (vDebugFlag & DEBUG_DEBUG) { taosPrintLog("VND ", vDebugFlag, __VA_ARGS__); }} #define vDebug(...) { if (vDebugFlag & DEBUG_DEBUG) { taosPrintLog("VND ", vDebugFlag, __VA_ARGS__); }}
#define vTrace(...) { if (vDebugFlag & DEBUG_TRACE) { taosPrintLog("VND ", vDebugFlag, __VA_ARGS__); }} #define vTrace(...) { if (vDebugFlag & DEBUG_TRACE) { taosPrintLog("VND ", vDebugFlag, __VA_ARGS__); }}
typedef struct STsdbCfg { typedef struct SVnode {
int32_t cacheBlockSize; // MB int32_t vgId;
int32_t totalBlocks; SVnodeCfg cfg;
int32_t daysPerFile;
int32_t daysToKeep0;
int32_t daysToKeep1;
int32_t daysToKeep2;
int32_t minRowsPerFileBlock;
int32_t maxRowsPerFileBlock;
uint8_t precision; // time resolution
int8_t compression;
int8_t cacheLastRow;
int8_t update;
} STsdbCfg;
typedef struct SMetaCfg {
} SMetaCfg;
typedef struct SVnodeCfg {
char db[TSDB_ACCT_ID_LEN + TSDB_DB_NAME_LEN];
int8_t dropped;
int8_t quorum;
SWalCfg wal;
STsdbCfg tsdb;
SMetaCfg meta;
SSyncCluster sync;
} SVnodeCfg;
typedef struct {
int32_t vgId; // global vnode group ID
int32_t refCount; // reference count
SMemAllocator *allocator;
SMeta *pMeta; SMeta *pMeta;
STsdb *pTsdb; STsdb *pTsdb;
STQ *pTQ; STQ *pTQ;
SWal *pWal; SWal *pWal;
void *pQuery;
SSyncNode *pSync; SSyncNode *pSync;
taos_queue pWriteQ; // write queue
taos_queue pQueryQ; // read query queue
taos_queue pFetchQ; // read fetch/cancel queue
SVnodeCfg cfg;
SSyncServerState term;
int64_t queuedWMsgSize;
int32_t queuedWMsg;
int32_t queuedRMsg;
int32_t numOfQHandle; // current initialized and existed query handle in current dnode
int8_t role;
int8_t accessState;
int8_t dropped;
int8_t status;
pthread_mutex_t statusMutex;
} SVnode; } SVnode;
typedef struct {
int32_t len;
void *rsp;
void *qhandle; // used by query and retrieve msg
} SVnRsp;
void vnodeSendMsgToDnode(struct SRpcEpSet *epSet, struct SRpcMsg *rpcMsg);
void vnodeSendMsgToMnode(struct SRpcMsg *rpcMsg);
void vnodeGetDnodeEp(int32_t dnodeId, char *ep, char *fqdn, uint16_t *port);
void vnodeReportStartup(char *name, char *desc);
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif
......
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
#ifndef _TD_VNODE_MEM_ALLOCATOR_H_ #ifndef _TD_VNODE_MEM_ALLOCATOR_H_
#define _TD_VNODE_MEM_ALLOCATOR_H_ #define _TD_VNODE_MEM_ALLOCATOR_H_
#include "os.h" #include "vnodeInt.h"
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
......
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _TD_VNODE_READ_H_
#define _TD_VNODE_READ_H_
#ifdef __cplusplus
extern "C" {
#endif
#include "vnodeInt.h"
void vnodeProcessReadMsg(SVnode *pVnode, SVnodeMsg *pMsg);
#ifdef __cplusplus
}
#endif
#endif /*_TD_VNODE_READ_H_*/
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _TD_VNODE_WRITE_H_
#define _TD_VNODE_WRITE_H_
#ifdef __cplusplus
extern "C" {
#endif
#include "vnodeInt.h"
void vnodeProcessWriteMsg(SVnode* pVnode, SVnodeMsg* pMsg);
#ifdef __cplusplus
}
#endif
#endif /*_TD_VNODE_WRITE_H_*/
...@@ -13,21 +13,21 @@ ...@@ -13,21 +13,21 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>. * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/ */
#ifndef _TD_VNODE_CFG_H_ #define _DEFAULT_SOURCE
#define _TD_VNODE_CFG_H_
#ifdef __cplusplus
extern "C" {
#endif
#include "vnodeInt.h" #include "vnodeInt.h"
int32_t vnodeReadCfg(int32_t vgId, SVnodeCfg *pCfg); int32_t vnodeInit() { return 0; }
int32_t vnodeWriteCfg(int32_t vgId, SVnodeCfg *pCfg); void vnodeCleanup() {}
int32_t vnodeReadState(int32_t vgId, SSyncServerState *pState);
int32_t vnodeSaveState(int32_t vgid, SSyncServerState *pState); int32_t vnodeGetStatistics(SVnode *pVnode, SVnodeStatisic *pStat) { return 0; }
int32_t vnodeGetStatus(SVnode *pVnode, SVnodeStatus *pStatus) { return 0; }
#ifdef __cplusplus SVnode *vnodeOpen(int32_t vgId, const char *path) { return NULL; }
} void vnodeClose(SVnode *pVnode) {}
#endif int32_t vnodeAlter(SVnode *pVnode, const SVnodeCfg *pCfg) { return 0; }
SVnode *vnodeCreate(int32_t vgId, const char *path, const SVnodeCfg *pCfg) { return NULL; }
int32_t vnodeDrop(SVnode *pVnode) { return 0; }
int32_t vnodeCompact(SVnode *pVnode) { return 0; }
int32_t vnodeSync(SVnode *pVnode) { return 0; }
#endif /*_TD_VNODE_CFG_H_*/ int32_t vnodeProcessMsg(SVnode *pVnode, SVnodeMsg *pMsg) { return 0; }
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#define _DEFAULT_SOURCE
#include "vnodeRead.h"
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#define _DEFAULT_SOURCE
#include "vnodeWrite.h"
...@@ -6,4 +6,4 @@ target_sources(VMATest ...@@ -6,4 +6,4 @@ target_sources(VMATest
"vnodeMemAllocatorTest.cpp" "vnodeMemAllocatorTest.cpp"
) )
target_include_directories(VMATest PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/../inc") target_include_directories(VMATest PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/../inc")
target_link_libraries(VMATest os gtest_main) target_link_libraries(VMATest os gtest_main vnode)
\ No newline at end of file \ No newline at end of file
...@@ -1841,7 +1841,7 @@ int tscTransferTableNameList(SSqlObj *pSql, const char *pNameList, int32_t lengt ...@@ -1841,7 +1841,7 @@ int tscTransferTableNameList(SSqlObj *pSql, const char *pNameList, int32_t lengt
SSqlCmd *pCmd = &pSql->cmd; SSqlCmd *pCmd = &pSql->cmd;
pCmd->command = TSDB_SQL_MULTI_META; pCmd->command = TSDB_SQL_MULTI_META;
pCmd->msgType = TSDB_MSG_TYPE_CM_TABLES_META; pCmd->msgType = TSDB_MSG_TYPE_TABLES_META;
int code = TSDB_CODE_TSC_INVALID_TABLE_ID_LENGTH; int code = TSDB_CODE_TSC_INVALID_TABLE_ID_LENGTH;
char *str = (char *)pNameList; char *str = (char *)pNameList;
......
...@@ -403,10 +403,10 @@ void rpcSendRequest(void *shandle, const SRpcEpSet *pEpSet, SRpcMsg *pMsg, int64 ...@@ -403,10 +403,10 @@ void rpcSendRequest(void *shandle, const SRpcEpSet *pEpSet, SRpcMsg *pMsg, int64
// connection type is application specific. // connection type is application specific.
// for TDengine, all the query, show commands shall have TCP connection // for TDengine, all the query, show commands shall have TCP connection
char type = pMsg->msgType; char type = pMsg->msgType;
if (type == TSDB_MSG_TYPE_QUERY || type == TSDB_MSG_TYPE_CM_RETRIEVE if (type == TSDB_MSG_TYPE_QUERY || type == TSDB_MSG_TYPE_SHOW_RETRIEVE
|| type == TSDB_MSG_TYPE_FETCH || type == TSDB_MSG_TYPE_CM_STABLE_VGROUP || type == TSDB_MSG_TYPE_FETCH || type == TSDB_MSG_TYPE_STABLE_VGROUP
|| type == TSDB_MSG_TYPE_CM_TABLES_META || type == TSDB_MSG_TYPE_CM_TABLE_META || type == TSDB_MSG_TYPE_TABLES_META || type == TSDB_MSG_TYPE_TABLE_META
|| type == TSDB_MSG_TYPE_CM_SHOW || type == TSDB_MSG_TYPE_DM_STATUS || type == TSDB_MSG_TYPE_CM_ALTER_TABLE) || type == TSDB_MSG_TYPE_SHOW || type == TSDB_MSG_TYPE_DM_STATUS || type == TSDB_MSG_TYPE_ALTER_TABLE)
pContext->connType = RPC_CONN_TCPC; pContext->connType = RPC_CONN_TCPC;
pContext->rid = taosAddRef(tsRpcRefId, pContext); pContext->rid = taosAddRef(tsRpcRefId, pContext);
......
...@@ -15,9 +15,9 @@ ...@@ -15,9 +15,9 @@
#define _DEFAULT_SOURCE #define _DEFAULT_SOURCE
#include "dnodeCheck.h" #include "dnodeCheck.h"
#include "dnodeEps.h" #include "dnodeConfig.h"
#include "dnodeMsg.h" #include "dnodeDnode.h"
#include "dnodeTrans.h" #include "dnodeTransport.h"
#include "mnode.h" #include "mnode.h"
#include "sync.h" #include "sync.h"
#include "tcache.h" #include "tcache.h"
...@@ -54,13 +54,7 @@ static void dnodeReportStartupFinished(char *name, char *desc) { ...@@ -54,13 +54,7 @@ static void dnodeReportStartupFinished(char *name, char *desc) {
void dnodeGetStartup(SStartupStep *pStep) { memcpy(pStep, &tsDnode.startup, sizeof(SStartupStep)); } void dnodeGetStartup(SStartupStep *pStep) { memcpy(pStep, &tsDnode.startup, sizeof(SStartupStep)); }
static int32_t dnodeInitVnode() { static int32_t dnodeInitVnode() {
SVnodePara para; return vnodeInit();
para.fp.GetDnodeEp = dnodeGetEp;
para.fp.SendMsgToDnode = dnodeSendMsgToDnode;
para.fp.SendMsgToMnode = dnodeSendMsgToMnode;
para.fp.ReportStartup = dnodeReportStartup;
return vnodeInit(para);
} }
static int32_t dnodeInitMnode() { static int32_t dnodeInitMnode() {
...@@ -200,3 +194,11 @@ void dnodeCleanup() { ...@@ -200,3 +194,11 @@ void dnodeCleanup() {
tsDnode.steps = NULL; tsDnode.steps = NULL;
} }
} }
// tsVnode.msgFp[TSDB_MSG_TYPE_MD_CREATE_VNODE] = vnodeProcessMgmtMsg;
// tsVnode.msgFp[TSDB_MSG_TYPE_MD_ALTER_VNODE] = vnodeProcessMgmtMsg;
// tsVnode.msgFp[TSDB_MSG_TYPE_MD_SYNC_VNODE] = vnodeProcessMgmtMsg;
// tsVnode.msgFp[TSDB_MSG_TYPE_MD_COMPACT_VNODE] = vnodeProcessMgmtMsg;
// tsVnode.msgFp[TSDB_MSG_TYPE_MD_DROP_VNODE] = vnodeProcessMgmtMsg;
// tsVnode.msgFp[TSDB_MSG_TYPE_MD_ALTER_STREAM] = vnodeProcessMgmtMsg;
\ No newline at end of file
...@@ -14,8 +14,8 @@ ...@@ -14,8 +14,8 @@
*/ */
#define _DEFAULT_SOURCE #define _DEFAULT_SOURCE
#include "dnodeMsg.h" #include "dnodeDnode.h"
#include "dnodeEps.h" #include "dnodeConfig.h"
#include "mnode.h" #include "mnode.h"
#include "tthread.h" #include "tthread.h"
#include "ttime.h" #include "ttime.h"
...@@ -52,9 +52,9 @@ static void dnodeSendStatusMsg() { ...@@ -52,9 +52,9 @@ static void dnodeSendStatusMsg() {
tstrncpy(pStatus->clusterCfg.locale, tsLocale, TSDB_LOCALE_LEN); tstrncpy(pStatus->clusterCfg.locale, tsLocale, TSDB_LOCALE_LEN);
tstrncpy(pStatus->clusterCfg.charset, tsCharset, TSDB_LOCALE_LEN); tstrncpy(pStatus->clusterCfg.charset, tsCharset, TSDB_LOCALE_LEN);
vnodeGetStatus(pStatus); // vnodeGetStatus(NULL, pStatus);
contLen = sizeof(SStatusMsg) + pStatus->openVnodes * sizeof(SVnodeLoad); // contLen = sizeof(SStatusMsg) + pStatus->openVnodes * sizeof(SVnodeLoad);
pStatus->openVnodes = htons(pStatus->openVnodes); // pStatus->openVnodes = htons(pStatus->openVnodes);
SRpcMsg rpcMsg = {.ahandle = NULL, .pCont = pStatus, .contLen = contLen, .msgType = TSDB_MSG_TYPE_DM_STATUS}; SRpcMsg rpcMsg = {.ahandle = NULL, .pCont = pStatus, .contLen = contLen, .msgType = TSDB_MSG_TYPE_DM_STATUS};
...@@ -79,7 +79,7 @@ void dnodeProcessStatusRsp(SRpcMsg *pMsg) { ...@@ -79,7 +79,7 @@ void dnodeProcessStatusRsp(SRpcMsg *pMsg) {
return; return;
} }
vnodeSetAccess(pStatusRsp->vgAccess, pCfg->numOfVnodes); // vnodeSetAccess(pStatusRsp->vgAccess, pCfg->numOfVnodes);
SDnodeEps *eps = (SDnodeEps *)((char *)pStatusRsp->vgAccess + pCfg->numOfVnodes * sizeof(SVgroupAccess)); SDnodeEps *eps = (SDnodeEps *)((char *)pStatusRsp->vgAccess + pCfg->numOfVnodes * sizeof(SVgroupAccess));
eps->dnodeNum = htonl(eps->dnodeNum); eps->dnodeNum = htonl(eps->dnodeNum);
......
...@@ -20,24 +20,107 @@ ...@@ -20,24 +20,107 @@
*/ */
#define _DEFAULT_SOURCE #define _DEFAULT_SOURCE
#include "dnodeTrans.h" #include "dnodeTransport.h"
#include "dnodeEps.h" #include "dnodeConfig.h"
#include "dnodeMsg.h" #include "dnodeDnode.h"
#include "mnode.h" #include "mnode.h"
#include "vnode.h" #include "vnode.h"
typedef void (*RpcMsgFp)(SRpcMsg *pMsg); typedef void (*MsgFp)(SRpcMsg *pMsg);
static struct { static struct {
void *serverRpc; void *serverRpc;
void *clientRpc; void *clientRpc;
void *shellRpc; void *shellRpc;
int32_t queryReqNum; MsgFp msgFp[TSDB_MSG_TYPE_MAX];
int32_t submitReqNum;
RpcMsgFp peerMsgFp[TSDB_MSG_TYPE_MAX];
RpcMsgFp shellMsgFp[TSDB_MSG_TYPE_MAX];
} tsTrans; } tsTrans;
static void dnodeInitMsgFp() {
// msg from client to dnode
tsTrans.msgFp[TSDB_MSG_TYPE_SUBMIT] = vnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_QUERY] = vnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_FETCH] = vnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_CREATE_TABLE] = vnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_DROP_TABLE] = vnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_ALTER_TABLE] = vnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_UPDATE_TAG_VAL] = vnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_TABLE_META] = vnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_TABLES_META] = vnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_STABLE_VGROUP] = mnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_MQ_QUERY] = vnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_MQ_CONSUME] = vnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_MQ_CONNECT] = vnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_MQ_DISCONNECT] = vnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_MQ_ACK] = vnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_MQ_RESET] = vnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_NETWORK_TEST] = dnodeProcessStartupReq;
// msg from client to mnode
tsTrans.msgFp[TSDB_MSG_TYPE_CONNECT] = mnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_CREATE_ACCT] = mnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_ALTER_ACCT] = mnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_DROP_ACCT] = mnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_CREATE_USER] = mnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_ALTER_USER] = mnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_DROP_USER] = mnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_CREATE_DNODE] = mnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_CONFIG_DNODE] = mnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_DROP_DNODE] = mnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_CREATE_DB] = mnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_DROP_DB] = mnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_USE_DB] = mnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_ALTER_DB] = mnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_SYNC_DB] = mnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_CREATE_TOPIC] = mnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_DROP_TOPIC] = mnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_ALTER_TOPIC] = mnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_CREATE_FUNCTION] = mnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_ALTER_FUNCTION] = mnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_DROP_FUNCTION] = mnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_CREATE_STABLE] = mnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_DROP_STABLE] = mnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_ALTER_STABLE] = mnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_KILL_QUERY] = mnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_KILL_CONN] = mnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_HEARTBEAT] = mnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_SHOW] = mnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_SHOW_RETRIEVE] = mnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_SHOW_RETRIEVE_FUNC] = mnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_COMPACT_VNODE] = mnodeProcessMsg;
// message from mnode to dnode
tsTrans.msgFp[TSDB_MSG_TYPE_CREATE_STABLE_IN] = vnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_CREATE_STABLE_IN_RSP] = mnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_CREATE_STABLE_IN] = vnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_ALTER_STABLE_IN_RSP] = mnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_CREATE_STABLE_IN] = vnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_DROP_STABLE_IN_RSP] = mnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_CREATE_VNODE_IN] = vnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_CREATE_VNODE_IN_RSP] = mnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_ALTER_VNODE_IN] = vnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_ALTER_VNODE_IN_RSP] = mnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_DROP_VNODE_IN] = vnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_DROP_VNODE_IN_RSP] = mnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_SYNC_VNODE_IN] = vnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_SYNC_VNODE_IN_RSP] = mnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_COMPACT_VNODE_IN] = vnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_COMPACT_VNODE_IN_RSP] = mnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_CREATE_MNODE_IN] = dnodeProcessCreateMnodeReq;
tsTrans.msgFp[TSDB_MSG_TYPE_CREATE_MNODE_IN_RSP] = mnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_DROP_MNODE_IN] = NULL;
tsTrans.msgFp[TSDB_MSG_TYPE_DROP_MNODE_IN_RSP] = mnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_CONFIG_DNODE_IN] = NULL;
tsTrans.msgFp[TSDB_MSG_TYPE_CONFIG_DNODE_IN_RSP] = mnodeProcessMsg;
// message from dnode to mnode
tsTrans.msgFp[TSDB_MSG_TYPE_DM_AUTH] = mnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_DM_AUTH_RSP] = NULL;
tsTrans.msgFp[TSDB_MSG_TYPE_DM_GRANT] = mnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_DM_GRANT_RSP] = NULL;
tsTrans.msgFp[TSDB_MSG_TYPE_DM_STATUS] = mnodeProcessMsg;
tsTrans.msgFp[TSDB_MSG_TYPE_DM_STATUS_RSP] = dnodeProcessStatusRsp;
}
static void dnodeProcessPeerReq(SRpcMsg *pMsg, SRpcEpSet *pEpSet) { static void dnodeProcessPeerReq(SRpcMsg *pMsg, SRpcEpSet *pEpSet) {
SRpcMsg rspMsg = {.handle = pMsg->handle, .pCont = NULL, .contLen = 0}; SRpcMsg rspMsg = {.handle = pMsg->handle, .pCont = NULL, .contLen = 0};
int32_t msgType = pMsg->msgType; int32_t msgType = pMsg->msgType;
...@@ -61,7 +144,7 @@ static void dnodeProcessPeerReq(SRpcMsg *pMsg, SRpcEpSet *pEpSet) { ...@@ -61,7 +144,7 @@ static void dnodeProcessPeerReq(SRpcMsg *pMsg, SRpcEpSet *pEpSet) {
return; return;
} }
RpcMsgFp fp = tsTrans.peerMsgFp[msgType]; MsgFp fp = tsTrans.msgFp[msgType];
if (fp != NULL) { if (fp != NULL) {
dTrace("RPC %p, peer req:%s will be processed", pMsg->handle, taosMsg[msgType]); dTrace("RPC %p, peer req:%s will be processed", pMsg->handle, taosMsg[msgType]);
(*fp)(pMsg); (*fp)(pMsg);
...@@ -74,35 +157,6 @@ static void dnodeProcessPeerReq(SRpcMsg *pMsg, SRpcEpSet *pEpSet) { ...@@ -74,35 +157,6 @@ static void dnodeProcessPeerReq(SRpcMsg *pMsg, SRpcEpSet *pEpSet) {
} }
static int32_t dnodeInitServer() { static int32_t dnodeInitServer() {
tsTrans.peerMsgFp[TSDB_MSG_TYPE_MD_CREATE_TABLE] = vnodeProcessMsg;
tsTrans.peerMsgFp[TSDB_MSG_TYPE_MD_DROP_TABLE] = vnodeProcessMsg;
tsTrans.peerMsgFp[TSDB_MSG_TYPE_MD_ALTER_TABLE] = vnodeProcessMsg;
tsTrans.peerMsgFp[TSDB_MSG_TYPE_MD_DROP_STABLE] = vnodeProcessMsg;
tsTrans.peerMsgFp[TSDB_MSG_TYPE_MD_CREATE_VNODE] = vnodeProcessMsg;
tsTrans.peerMsgFp[TSDB_MSG_TYPE_MD_ALTER_VNODE] = vnodeProcessMsg;
tsTrans.peerMsgFp[TSDB_MSG_TYPE_MD_SYNC_VNODE] = vnodeProcessMsg;
tsTrans.peerMsgFp[TSDB_MSG_TYPE_MD_COMPACT_VNODE] = vnodeProcessMsg;
tsTrans.peerMsgFp[TSDB_MSG_TYPE_MD_DROP_VNODE] = vnodeProcessMsg;
tsTrans.peerMsgFp[TSDB_MSG_TYPE_MD_ALTER_STREAM] = vnodeProcessMsg;
tsTrans.peerMsgFp[TSDB_MSG_TYPE_MD_CONFIG_DNODE] = dnodeProcessConfigDnodeReq;
tsTrans.peerMsgFp[TSDB_MSG_TYPE_MD_CREATE_MNODE] = dnodeProcessCreateMnodeReq;
tsTrans.peerMsgFp[TSDB_MSG_TYPE_DM_CONFIG_TABLE] = mnodeProcessMsg;
tsTrans.peerMsgFp[TSDB_MSG_TYPE_DM_CONFIG_VNODE] = mnodeProcessMsg;
tsTrans.peerMsgFp[TSDB_MSG_TYPE_DM_AUTH] = mnodeProcessMsg;
tsTrans.peerMsgFp[TSDB_MSG_TYPE_DM_GRANT] = mnodeProcessMsg;
tsTrans.peerMsgFp[TSDB_MSG_TYPE_DM_STATUS] = mnodeProcessMsg;
tsTrans.peerMsgFp[TSDB_MSG_TYPE_MQ_CONNECT] = vnodeProcessMsg;
tsTrans.peerMsgFp[TSDB_MSG_TYPE_MQ_DISCONNECT] = vnodeProcessMsg;
tsTrans.peerMsgFp[TSDB_MSG_TYPE_MQ_ACK] = vnodeProcessMsg;
tsTrans.peerMsgFp[TSDB_MSG_TYPE_MQ_RESET] = vnodeProcessMsg;
tsTrans.peerMsgFp[TSDB_MSG_TYPE_MQ_CONSUME] = vnodeProcessMsg;
tsTrans.peerMsgFp[TSDB_MSG_TYPE_MQ_QUERY] = vnodeProcessMsg;
SRpcInit rpcInit; SRpcInit rpcInit;
memset(&rpcInit, 0, sizeof(rpcInit)); memset(&rpcInit, 0, sizeof(rpcInit));
rpcInit.localPort = tsDnodeDnodePort; rpcInit.localPort = tsDnodeDnodePort;
...@@ -145,7 +199,7 @@ static void dnodeProcessPeerRsp(SRpcMsg *pMsg, SRpcEpSet *pEpSet) { ...@@ -145,7 +199,7 @@ static void dnodeProcessPeerRsp(SRpcMsg *pMsg, SRpcEpSet *pEpSet) {
dnodeUpdateMnodeEps(pEpSet); dnodeUpdateMnodeEps(pEpSet);
} }
RpcMsgFp fp = tsTrans.peerMsgFp[msgType]; MsgFp fp = tsTrans.msgFp[msgType];
if (fp != NULL) { if (fp != NULL) {
dTrace("RPC %p, peer rsp:%s will be processed", pMsg->handle, taosMsg[msgType]); dTrace("RPC %p, peer rsp:%s will be processed", pMsg->handle, taosMsg[msgType]);
(*fp)(pMsg); (*fp)(pMsg);
...@@ -157,28 +211,6 @@ static void dnodeProcessPeerRsp(SRpcMsg *pMsg, SRpcEpSet *pEpSet) { ...@@ -157,28 +211,6 @@ static void dnodeProcessPeerRsp(SRpcMsg *pMsg, SRpcEpSet *pEpSet) {
} }
static int32_t dnodeInitClient() { static int32_t dnodeInitClient() {
tsTrans.peerMsgFp[TSDB_MSG_TYPE_MD_CREATE_TABLE_RSP] = mnodeProcessMsg;
tsTrans.peerMsgFp[TSDB_MSG_TYPE_MD_DROP_TABLE_RSP] = mnodeProcessMsg;
tsTrans.peerMsgFp[TSDB_MSG_TYPE_MD_ALTER_TABLE_RSP] = mnodeProcessMsg;
tsTrans.peerMsgFp[TSDB_MSG_TYPE_MD_DROP_STABLE_RSP] = mnodeProcessMsg;
tsTrans.peerMsgFp[TSDB_MSG_TYPE_MD_CREATE_VNODE_RSP] = mnodeProcessMsg;
tsTrans.peerMsgFp[TSDB_MSG_TYPE_MD_ALTER_VNODE_RSP] = mnodeProcessMsg;
tsTrans.peerMsgFp[TSDB_MSG_TYPE_MD_SYNC_VNODE_RSP] = mnodeProcessMsg;
tsTrans.peerMsgFp[TSDB_MSG_TYPE_MD_COMPACT_VNODE_RSP] = mnodeProcessMsg;
tsTrans.peerMsgFp[TSDB_MSG_TYPE_MD_DROP_VNODE_RSP] = mnodeProcessMsg;
tsTrans.peerMsgFp[TSDB_MSG_TYPE_MD_ALTER_STREAM_RSP] = mnodeProcessMsg;
tsTrans.peerMsgFp[TSDB_MSG_TYPE_MD_CONFIG_DNODE_RSP] = mnodeProcessMsg;
tsTrans.peerMsgFp[TSDB_MSG_TYPE_MD_CREATE_MNODE_RSP] = mnodeProcessMsg;
tsTrans.peerMsgFp[TSDB_MSG_TYPE_DM_CONFIG_TABLE_RSP] = mnodeProcessMsg;
tsTrans.peerMsgFp[TSDB_MSG_TYPE_DM_CONFIG_VNODE_RSP] = mnodeProcessMsg;
tsTrans.peerMsgFp[TSDB_MSG_TYPE_DM_AUTH_RSP] = mnodeProcessMsg;
tsTrans.peerMsgFp[TSDB_MSG_TYPE_DM_GRANT_RSP] = mnodeProcessMsg;
tsTrans.peerMsgFp[TSDB_MSG_TYPE_DM_STATUS_RSP] = dnodeProcessStatusRsp;
char secret[TSDB_KEY_LEN] = "secret"; char secret[TSDB_KEY_LEN] = "secret";
SRpcInit rpcInit; SRpcInit rpcInit;
memset(&rpcInit, 0, sizeof(rpcInit)); memset(&rpcInit, 0, sizeof(rpcInit));
...@@ -234,14 +266,7 @@ static void dnodeProcessShellReq(SRpcMsg *pMsg, SRpcEpSet *pEpSet) { ...@@ -234,14 +266,7 @@ static void dnodeProcessShellReq(SRpcMsg *pMsg, SRpcEpSet *pEpSet) {
return; return;
} }
if (msgType == TSDB_MSG_TYPE_QUERY) { MsgFp fp = tsTrans.msgFp[msgType];
atomic_fetch_add_32(&tsTrans.queryReqNum, 1);
} else if (msgType == TSDB_MSG_TYPE_SUBMIT) {
atomic_fetch_add_32(&tsTrans.submitReqNum, 1);
} else {
}
RpcMsgFp fp = tsTrans.shellMsgFp[msgType];
if (fp != NULL) { if (fp != NULL) {
dTrace("RPC %p, shell req:%s will be processed", pMsg->handle, taosMsg[msgType]); dTrace("RPC %p, shell req:%s will be processed", pMsg->handle, taosMsg[msgType]);
(*fp)(pMsg); (*fp)(pMsg);
...@@ -299,54 +324,6 @@ static int32_t dnodeRetrieveUserAuthInfo(char *user, char *spi, char *encrypt, c ...@@ -299,54 +324,6 @@ static int32_t dnodeRetrieveUserAuthInfo(char *user, char *spi, char *encrypt, c
} }
static int32_t dnodeInitShell() { static int32_t dnodeInitShell() {
tsTrans.shellMsgFp[TSDB_MSG_TYPE_SUBMIT] = vnodeProcessMsg;
tsTrans.shellMsgFp[TSDB_MSG_TYPE_QUERY] = vnodeProcessMsg;
tsTrans.shellMsgFp[TSDB_MSG_TYPE_FETCH] = vnodeProcessMsg;
tsTrans.shellMsgFp[TSDB_MSG_TYPE_UPDATE_TAG_VAL] = vnodeProcessMsg;
tsTrans.shellMsgFp[TSDB_MSG_TYPE_MQ_QUERY] = vnodeProcessMsg;
tsTrans.shellMsgFp[TSDB_MSG_TYPE_MQ_CONSUME] = vnodeProcessMsg;
// the following message shall be treated as mnode write
tsTrans.shellMsgFp[TSDB_MSG_TYPE_CM_CREATE_ACCT] = mnodeProcessMsg;
tsTrans.shellMsgFp[TSDB_MSG_TYPE_CM_ALTER_ACCT] = mnodeProcessMsg;
tsTrans.shellMsgFp[TSDB_MSG_TYPE_CM_DROP_ACCT] = mnodeProcessMsg;
tsTrans.shellMsgFp[TSDB_MSG_TYPE_CM_CREATE_USER] = mnodeProcessMsg;
tsTrans.shellMsgFp[TSDB_MSG_TYPE_CM_ALTER_USER] = mnodeProcessMsg;
tsTrans.shellMsgFp[TSDB_MSG_TYPE_CM_DROP_USER] = mnodeProcessMsg;
tsTrans.shellMsgFp[TSDB_MSG_TYPE_CM_CREATE_DNODE] = mnodeProcessMsg;
tsTrans.shellMsgFp[TSDB_MSG_TYPE_CM_DROP_DNODE] = mnodeProcessMsg;
tsTrans.shellMsgFp[TSDB_MSG_TYPE_CM_CREATE_DB] = mnodeProcessMsg;
tsTrans.shellMsgFp[TSDB_MSG_TYPE_CM_CREATE_TP] = mnodeProcessMsg;
tsTrans.shellMsgFp[TSDB_MSG_TYPE_CM_CREATE_FUNCTION] = mnodeProcessMsg;
tsTrans.shellMsgFp[TSDB_MSG_TYPE_CM_DROP_DB] = mnodeProcessMsg;
tsTrans.shellMsgFp[TSDB_MSG_TYPE_CM_SYNC_DB] = mnodeProcessMsg;
tsTrans.shellMsgFp[TSDB_MSG_TYPE_CM_DROP_TP] = mnodeProcessMsg;
tsTrans.shellMsgFp[TSDB_MSG_TYPE_CM_DROP_FUNCTION] = mnodeProcessMsg;
tsTrans.shellMsgFp[TSDB_MSG_TYPE_CM_ALTER_DB] = mnodeProcessMsg;
tsTrans.shellMsgFp[TSDB_MSG_TYPE_CM_ALTER_TP] = mnodeProcessMsg;
tsTrans.shellMsgFp[TSDB_MSG_TYPE_CM_CREATE_TABLE] = mnodeProcessMsg;
tsTrans.shellMsgFp[TSDB_MSG_TYPE_CM_DROP_TABLE] = mnodeProcessMsg;
tsTrans.shellMsgFp[TSDB_MSG_TYPE_CM_ALTER_TABLE] = mnodeProcessMsg;
tsTrans.shellMsgFp[TSDB_MSG_TYPE_CM_ALTER_STREAM] = mnodeProcessMsg;
tsTrans.shellMsgFp[TSDB_MSG_TYPE_CM_KILL_QUERY] = mnodeProcessMsg;
tsTrans.shellMsgFp[TSDB_MSG_TYPE_CM_KILL_STREAM] = mnodeProcessMsg;
tsTrans.shellMsgFp[TSDB_MSG_TYPE_CM_KILL_CONN] = mnodeProcessMsg;
tsTrans.shellMsgFp[TSDB_MSG_TYPE_CM_CONFIG_DNODE] = mnodeProcessMsg;
tsTrans.shellMsgFp[TSDB_MSG_TYPE_CM_COMPACT_VNODE] = mnodeProcessMsg;
// the following message shall be treated as mnode query
tsTrans.shellMsgFp[TSDB_MSG_TYPE_CM_HEARTBEAT] = mnodeProcessMsg;
tsTrans.shellMsgFp[TSDB_MSG_TYPE_CM_CONNECT] = mnodeProcessMsg;
tsTrans.shellMsgFp[TSDB_MSG_TYPE_CM_USE_DB] = mnodeProcessMsg;
tsTrans.shellMsgFp[TSDB_MSG_TYPE_CM_TABLE_META] = mnodeProcessMsg;
tsTrans.shellMsgFp[TSDB_MSG_TYPE_CM_STABLE_VGROUP] = mnodeProcessMsg;
tsTrans.shellMsgFp[TSDB_MSG_TYPE_CM_TABLES_META] = mnodeProcessMsg;
tsTrans.shellMsgFp[TSDB_MSG_TYPE_CM_SHOW] = mnodeProcessMsg;
tsTrans.shellMsgFp[TSDB_MSG_TYPE_CM_RETRIEVE] = mnodeProcessMsg;
tsTrans.shellMsgFp[TSDB_MSG_TYPE_CM_RETRIEVE_FUNC] = mnodeProcessMsg;
tsTrans.shellMsgFp[TSDB_MSG_TYPE_NETWORK_TEST] = dnodeProcessStartupReq;
int32_t numOfThreads = (int32_t)((tsNumOfCores * tsNumOfThreadsPerCore) / 2.0); int32_t numOfThreads = (int32_t)((tsNumOfCores * tsNumOfThreadsPerCore) / 2.0);
if (numOfThreads < 1) { if (numOfThreads < 1) {
numOfThreads = 1; numOfThreads = 1;
......
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _TD_VNODE_MAIN_H_
#define _TD_VNODE_MAIN_H_
#include "vnodeInt.h"
#ifdef __cplusplus
extern "C" {
#endif
int32_t vnodeInitMain();
void vnodeCleanupMain();
SVnode *vnodeAcquireInAllState(int32_t vgId);
SVnode *vnodeAcquire(int32_t vgId);
void vnodeRelease(SVnode *pVnode);
int32_t vnodeCreateVnode(int32_t vgId, SVnodeCfg *pCfg);
int32_t vnodeAlterVnode(SVnode *pVnode, SVnodeCfg *pCfg);
int32_t vnodeDropVnode(SVnode *pVnode);
int32_t vnodeSyncVnode(SVnode *pVnode);
int32_t vnodeCompactVnode(SVnode *pVnode);
#ifdef __cplusplus
}
#endif
#endif /*_TD_VNODE_MAIN_H_*/
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _TD_VNODE_MGMT_H_
#define _TD_VNODE_MGMT_H_
#ifdef __cplusplus
extern "C" {
#endif
#include "vnodeInt.h"
typedef struct {
SVnode *pVnode;
SRpcMsg rpcMsg;
char pCont[];
} SVnMgmtMsg;
int32_t vnodeInitMgmt();
void vnodeCleanupMgmt();
void vnodeProcessMgmtMsg(SRpcMsg *pMsg);
#ifdef __cplusplus
}
#endif
#endif /*_TD_VNODE_MGMT_H_*/
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _TD_VNODE_READ_H_
#define _TD_VNODE_READ_H_
#ifdef __cplusplus
extern "C" {
#endif
#include "vnodeInt.h"
int32_t vnodeInitRead();
void vnodeCleanupRead();
taos_queue vnodeAllocQueryQueue(SVnode *pVnode);
taos_queue vnodeAllocFetchQueue(SVnode *pVnode);
void vnodeFreeQueryQueue(taos_queue pQueue);
void vnodeFreeFetchQueue(taos_queue pQueue);
void vnodeProcessReadMsg(SRpcMsg *pRpcMsg);
int32_t vnodeReputPutToRQueue(SVnode *pVnode, void **qhandle, void *ahandle);
void vnodeStartRead(SVnode *pVnode);
void vnodeStopRead(SVnode *pVnode);
void vnodeWaitReadCompleted(SVnode *pVnode);
#ifdef __cplusplus
}
#endif
#endif /*_TD_VNODE_READ_H_*/
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _TD_VNODE_READ_MSG_H_
#define _TD_VNODE_READ_MSG_H_
#ifdef __cplusplus
extern "C" {
#endif
#include "vnodeInt.h"
typedef struct SReadMsg {
int32_t code;
int32_t contLen;
int8_t qtype;
int8_t msgType;
SVnode *pVnode;
SVnRsp rspRet;
void * rpcHandle;
void * rpcAhandle;
void * qhandle;
char pCont[];
} SReadMsg;
int32_t vnodeProcessQueryMsg(SVnode *pVnode, SReadMsg *pRead);
int32_t vnodeProcessFetchMsg(SVnode *pVnode, SReadMsg *pRead);
int32_t vnodeProcessConsumeMsg(SVnode *pVnode, SReadMsg *pRead);
int32_t vnodeProcessTqQueryMsg(SVnode *pVnode, SReadMsg *pRead);
#ifdef __cplusplus
}
#endif
#endif /*_TD_VNODE_READ_MSG_H_*/
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _TD_VNODE_WRITE_H_
#define _TD_VNODE_WRITE_H_
#ifdef __cplusplus
extern "C" {
#endif
#include "vnodeInt.h"
int32_t vnodeInitWrite();
void vnodeCleanupWrite();
taos_queue vnodeAllocWriteQueue(SVnode *pVnode);
void vnodeFreeWriteQueue(taos_queue pQueue);
void vnodeProcessWriteMsg(SRpcMsg *pRpcMsg);
int32_t vnodeProcessWalMsg(SVnode *pVnode, SWalHead *pHead);
void vnodeStartWrite(SVnode *pVnode);
void vnodeStopWrite(SVnode *pVnode);
void vnodeWaitWriteCompleted(SVnode *pVnode);
#ifdef __cplusplus
}
#endif
#endif /*_TD_VNODE_WRITE_H_*/
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _TD_VNODE_WRITE_MSG_H_
#define _TD_VNODE_WRITE_MSG_H_
#ifdef __cplusplus
extern "C" {
#endif
#include "vnodeInt.h"
int32_t vnodeProcessSubmitReq(SVnode *pVnode, SSubmitReq *pReq, SSubmitRsp *pRsp);
int32_t vnodeProcessCreateTableReq(SVnode *pVnode, SCreateTableReq *pReq, SCreateTableRsp *pRsp);
int32_t vnodeProcessDropTableReq(SVnode *pVnode, SDropTableReq *pReq, SDropTableRsp *pRsp);
int32_t vnodeProcessAlterTableReq(SVnode *pVnode, SAlterTableReq *pReq, SAlterTableRsp *pRsp);
int32_t vnodeProcessDropStableReq(SVnode *pVnode, SDropStableReq *pReq, SDropStableRsp *pRsp);
int32_t vnodeProcessUpdateTagValReq(SVnode *pVnode, SUpdateTagValReq *pReq, SUpdateTagValRsp *pRsp);
//mq related
int32_t vnodeProcessMqConnectReq(SVnode* pVnode, SMqConnectReq *pReq, SMqConnectRsp *pRsp);
int32_t vnodeProcessMqDisconnectReq(SVnode* pVnode, SMqConnectReq *pReq, SMqConnectRsp *pRsp);
int32_t vnodeProcessMqAckReq(SVnode* pVnode, SMqAckReq *pReq, SMqAckRsp *pRsp);
int32_t vnodeProcessMqResetReq(SVnode* pVnode, SMqResetReq *pReq, SMqResetRsp *pRsp);
//mq related end
#ifdef __cplusplus
}
#endif
#endif /*_TD_VNODE_WRITE_MSG_H_*/
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#define _DEFAULT_SOURCE
#include "cJSON.h"
#include "vnodeFile.h"
int32_t vnodeReadCfg(int32_t vgId, SVnodeCfg *pCfg) {
int32_t ret = TSDB_CODE_VND_APP_ERROR;
int32_t len = 0;
int maxLen = 1000;
char *content = calloc(1, maxLen + 1);
cJSON *root = NULL;
FILE *fp = NULL;
char file[PATH_MAX + 30] = {0};
sprintf(file, "%s/vnode%d/config.json", tsVnodeDir, vgId);
fp = fopen(file, "r");
if (!fp) {
vError("vgId:%d, failed to open vnode cfg file:%s to read since %s", vgId, file, strerror(errno));
ret = TAOS_SYSTEM_ERROR(errno);
goto PARSE_VCFG_ERROR;
}
len = (int32_t)fread(content, 1, maxLen, fp);
if (len <= 0) {
vError("vgId:%d, failed to read %s since content is null", vgId, file);
goto PARSE_VCFG_ERROR;
}
content[len] = 0;
root = cJSON_Parse(content);
if (root == NULL) {
vError("vgId:%d, failed to read %s since invalid json format", vgId, file);
goto PARSE_VCFG_ERROR;
}
cJSON *db = cJSON_GetObjectItem(root, "db");
if (!db || db->type != cJSON_String || db->valuestring == NULL) {
vError("vgId:%d, failed to read %s since db not found", vgId, file);
goto PARSE_VCFG_ERROR;
}
tstrncpy(pCfg->db, db->valuestring, sizeof(pCfg->db));
cJSON *dropped = cJSON_GetObjectItem(root, "dropped");
if (!dropped || dropped->type != cJSON_Number) {
vError("vgId:%d, failed to read %s since dropped not found", vgId, file);
goto PARSE_VCFG_ERROR;
}
pCfg->dropped = (int32_t)dropped->valueint;
cJSON *quorum = cJSON_GetObjectItem(root, "quorum");
if (!quorum || quorum->type != cJSON_Number) {
vError("vgId: %d, failed to read %s, quorum not found", vgId, file);
goto PARSE_VCFG_ERROR;
}
pCfg->quorum = (int8_t)quorum->valueint;
cJSON *cacheBlockSize = cJSON_GetObjectItem(root, "cacheBlockSize");
if (!cacheBlockSize || cacheBlockSize->type != cJSON_Number) {
vError("vgId:%d, failed to read %s since cacheBlockSize not found", vgId, file);
goto PARSE_VCFG_ERROR;
}
pCfg->tsdb.cacheBlockSize = (int32_t)cacheBlockSize->valueint;
cJSON *totalBlocks = cJSON_GetObjectItem(root, "totalBlocks");
if (!totalBlocks || totalBlocks->type != cJSON_Number) {
vError("vgId:%d, failed to read %s since totalBlocks not found", vgId, file);
goto PARSE_VCFG_ERROR;
}
pCfg->tsdb.totalBlocks = (int32_t)totalBlocks->valueint;
cJSON *daysPerFile = cJSON_GetObjectItem(root, "daysPerFile");
if (!daysPerFile || daysPerFile->type != cJSON_Number) {
vError("vgId:%d, failed to read %s since daysPerFile not found", vgId, file);
goto PARSE_VCFG_ERROR;
}
pCfg->tsdb.daysPerFile = (int32_t)daysPerFile->valueint;
cJSON *daysToKeep0 = cJSON_GetObjectItem(root, "daysToKeep0");
if (!daysToKeep0 || daysToKeep0->type != cJSON_Number) {
vError("vgId:%d, failed to read %s since daysToKeep0 not found", vgId, file);
goto PARSE_VCFG_ERROR;
}
pCfg->tsdb.daysToKeep0 = (int32_t)daysToKeep0->valueint;
cJSON *daysToKeep1 = cJSON_GetObjectItem(root, "daysToKeep1");
if (!daysToKeep1 || daysToKeep1->type != cJSON_Number) {
vError("vgId:%d, failed to read %s since daysToKeep1 not found", vgId, file);
goto PARSE_VCFG_ERROR;
}
pCfg->tsdb.daysToKeep1 = (int32_t)daysToKeep1->valueint;
cJSON *daysToKeep2 = cJSON_GetObjectItem(root, "daysToKeep2");
if (!daysToKeep2 || daysToKeep2->type != cJSON_Number) {
vError("vgId:%d, failed to read %s since daysToKeep2 not found", vgId, file);
goto PARSE_VCFG_ERROR;
}
pCfg->tsdb.daysToKeep2 = (int32_t)daysToKeep2->valueint;
cJSON *minRowsPerFileBlock = cJSON_GetObjectItem(root, "minRowsPerFileBlock");
if (!minRowsPerFileBlock || minRowsPerFileBlock->type != cJSON_Number) {
vError("vgId:%d, failed to read %s since minRowsPerFileBlock not found", vgId, file);
goto PARSE_VCFG_ERROR;
}
pCfg->tsdb.minRowsPerFileBlock = (int32_t)minRowsPerFileBlock->valueint;
cJSON *maxRowsPerFileBlock = cJSON_GetObjectItem(root, "maxRowsPerFileBlock");
if (!maxRowsPerFileBlock || maxRowsPerFileBlock->type != cJSON_Number) {
vError("vgId:%d, failed to read %s since maxRowsPerFileBlock not found", vgId, file);
goto PARSE_VCFG_ERROR;
}
pCfg->tsdb.maxRowsPerFileBlock = (int32_t)maxRowsPerFileBlock->valueint;
cJSON *precision = cJSON_GetObjectItem(root, "precision");
if (!precision || precision->type != cJSON_Number) {
vError("vgId:%d, failed to read %s since precision not found", vgId, file);
goto PARSE_VCFG_ERROR;
}
pCfg->tsdb.precision = (int8_t)precision->valueint;
cJSON *compression = cJSON_GetObjectItem(root, "compression");
if (!compression || compression->type != cJSON_Number) {
vError("vgId:%d, failed to read %s since compression not found", vgId, file);
goto PARSE_VCFG_ERROR;
}
pCfg->tsdb.compression = (int8_t)compression->valueint;
cJSON *update = cJSON_GetObjectItem(root, "update");
if (!update || update->type != cJSON_Number) {
vError("vgId: %d, failed to read %s since update not found", vgId, file);
goto PARSE_VCFG_ERROR;
}
pCfg->tsdb.update = (int8_t)update->valueint;
cJSON *cacheLastRow = cJSON_GetObjectItem(root, "cacheLastRow");
if (!cacheLastRow || cacheLastRow->type != cJSON_Number) {
vError("vgId: %d, failed to read %s since cacheLastRow not found", vgId, file);
goto PARSE_VCFG_ERROR;
}
pCfg->tsdb.cacheLastRow = (int8_t)cacheLastRow->valueint;
cJSON *walLevel = cJSON_GetObjectItem(root, "walLevel");
if (!walLevel || walLevel->type != cJSON_Number) {
vError("vgId:%d, failed to read %s since walLevel not found", vgId, file);
goto PARSE_VCFG_ERROR;
}
pCfg->wal.walLevel = (int8_t)walLevel->valueint;
cJSON *fsyncPeriod = cJSON_GetObjectItem(root, "fsyncPeriod");
if (!walLevel || walLevel->type != cJSON_Number) {
vError("vgId:%d, failed to read %s since fsyncPeriod not found", vgId, file);
goto PARSE_VCFG_ERROR;
}
pCfg->wal.fsyncPeriod = (int32_t)fsyncPeriod->valueint;
cJSON *selfIndex = cJSON_GetObjectItem(root, "selfIndex");
if (!selfIndex || selfIndex->type != cJSON_Number) {
vError("vgId:%d, failed to read %s since selfIndex not found", vgId, file);
goto PARSE_VCFG_ERROR;
}
pCfg->sync.selfIndex = selfIndex->valueint;
cJSON *replica = cJSON_GetObjectItem(root, "replica");
if (!replica || replica->type != cJSON_Number) {
vError("vgId:%d, failed to read %s since replica not found", vgId, file);
goto PARSE_VCFG_ERROR;
}
pCfg->sync.replica = replica->valueint;
cJSON *nodes = cJSON_GetObjectItem(root, "nodes");
if (!nodes || nodes->type != cJSON_Array) {
vError("vgId:%d, failed to read %s, nodes not found", vgId, file);
goto PARSE_VCFG_ERROR;
}
int size = cJSON_GetArraySize(nodes);
if (size != pCfg->sync.replica) {
vError("vgId:%d, failed to read %s since nodes size not matched", vgId, file);
goto PARSE_VCFG_ERROR;
}
for (int i = 0; i < size; ++i) {
cJSON *nodeInfo = cJSON_GetArrayItem(nodes, i);
if (nodeInfo == NULL) continue;
SNodeInfo *node = &pCfg->sync.nodeInfo[i];
cJSON *nodeId = cJSON_GetObjectItem(nodeInfo, "id");
if (!nodeId || nodeId->type != cJSON_Number) {
vError("vgId:%d, failed to read %s since nodeId not found", vgId, file);
goto PARSE_VCFG_ERROR;
}
node->nodeId = nodeId->valueint;
cJSON *nodePort = cJSON_GetObjectItem(nodeInfo, "port");
if (!nodePort || nodePort->type != cJSON_Number) {
vError("vgId:%d, failed to read %s sincenodePort not found", vgId, file);
goto PARSE_VCFG_ERROR;
}
node->nodePort = (uint16_t)nodePort->valueint;
cJSON *nodeFqdn = cJSON_GetObjectItem(nodeInfo, "fqdn");
if (!nodeFqdn || nodeFqdn->type != cJSON_String || nodeFqdn->valuestring == NULL) {
vError("vgId:%d, failed to read %s since nodeFqdn not found", vgId, file);
goto PARSE_VCFG_ERROR;
}
tstrncpy(node->nodeFqdn, nodeFqdn->valuestring, TSDB_FQDN_LEN);
}
ret = TSDB_CODE_SUCCESS;
PARSE_VCFG_ERROR:
if (content != NULL) free(content);
if (root != NULL) cJSON_Delete(root);
if (fp != NULL) fclose(fp);
terrno = 0;
return ret;
}
int32_t vnodeWriteCfg(int32_t vgId, SVnodeCfg *pCfg) {
int32_t code = 0;
char file[PATH_MAX + 30] = {0};
sprintf(file, "%s/vnode%d/config.json", tsVnodeDir, vgId);
FILE *fp = fopen(file, "w");
if (!fp) {
vError("vgId:%d, failed to write %s error:%s", vgId, file, strerror(errno));
terrno = TAOS_SYSTEM_ERROR(errno);
return terrno;
}
int32_t len = 0;
int32_t maxLen = 1000;
char *content = calloc(1, maxLen + 1);
len += snprintf(content + len, maxLen - len, "{\n");
// vnode
len += snprintf(content + len, maxLen - len, " \"vgId\": %d,\n", vgId);
len += snprintf(content + len, maxLen - len, " \"db\": \"%s\",\n", pCfg->db);
len += snprintf(content + len, maxLen - len, " \"dropped\": %d,\n", pCfg->dropped);
len += snprintf(content + len, maxLen - len, " \"quorum\": %d,\n", pCfg->quorum);
// tsdb
len += snprintf(content + len, maxLen - len, " \"cacheBlockSize\": %d,\n", pCfg->tsdb.cacheBlockSize);
len += snprintf(content + len, maxLen - len, " \"totalBlocks\": %d,\n", pCfg->tsdb.totalBlocks);
len += snprintf(content + len, maxLen - len, " \"daysPerFile\": %d,\n", pCfg->tsdb.daysPerFile);
len += snprintf(content + len, maxLen - len, " \"daysToKeep0\": %d,\n", pCfg->tsdb.daysToKeep0);
len += snprintf(content + len, maxLen - len, " \"daysToKeep1\": %d,\n", pCfg->tsdb.daysToKeep1);
len += snprintf(content + len, maxLen - len, " \"daysToKeep2\": %d,\n", pCfg->tsdb.daysToKeep2);
len += snprintf(content + len, maxLen - len, " \"minRowsPerFileBlock\": %d,\n", pCfg->tsdb.minRowsPerFileBlock);
len += snprintf(content + len, maxLen - len, " \"maxRowsPerFileBlock\": %d,\n", pCfg->tsdb.maxRowsPerFileBlock);
len += snprintf(content + len, maxLen - len, " \"precision\": %d,\n", pCfg->tsdb.precision);
len += snprintf(content + len, maxLen - len, " \"compression\": %d,\n", pCfg->tsdb.compression);
len += snprintf(content + len, maxLen - len, " \"cacheLastRow\": %d,\n", pCfg->tsdb.cacheLastRow);
len += snprintf(content + len, maxLen - len, " \"update\": %d,\n", pCfg->tsdb.update);
// wal
len += snprintf(content + len, maxLen - len, " \"walLevel\": %d,\n", pCfg->wal.walLevel);
len += snprintf(content + len, maxLen - len, " \"fsyncPeriod\": %d,\n", pCfg->wal.fsyncPeriod);
// sync
len += snprintf(content + len, maxLen - len, " \"replica\": %d,\n", pCfg->sync.replica);
len += snprintf(content + len, maxLen - len, " \"selfIndex\": %d,\n", pCfg->sync.selfIndex);
len += snprintf(content + len, maxLen - len, " \"nodes\": [{\n");
for (int32_t i = 0; i < pCfg->sync.replica; i++) {
SNodeInfo *node = &pCfg->sync.nodeInfo[i];
len += snprintf(content + len, maxLen - len, " \"id\": %d,\n", node->nodeId);
len += snprintf(content + len, maxLen - len, " \"port\": %u,\n", node->nodePort);
len += snprintf(content + len, maxLen - len, " \"fqdn\": \"%s\"\n", node->nodeFqdn);
if (i < pCfg->sync.replica - 1) {
len += snprintf(content + len, maxLen - len, " },{\n");
} else {
len += snprintf(content + len, maxLen - len, " }]\n");
}
}
len += snprintf(content + len, maxLen - len, "}\n");
fwrite(content, 1, len, fp);
taosFsyncFile(fileno(fp));
fclose(fp);
free(content);
terrno = 0;
vInfo("vgId:%d, successed to write %s", vgId, file);
return TSDB_CODE_SUCCESS;
}
int32_t vnodeReadState(int32_t vgId, SSyncServerState *pState) {
int32_t ret = TSDB_CODE_VND_APP_ERROR;
int32_t len = 0;
int32_t maxLen = 100;
char *content = calloc(1, maxLen + 1);
cJSON *root = NULL;
FILE *fp = NULL;
char file[PATH_MAX + 30] = {0};
sprintf(file, "%s/vnode%d/state.json", tsVnodeDir, vgId);
len = (int32_t)fread(content, 1, maxLen, fp);
if (len <= 0) {
vError("vgId:%d, failed to read %s since content is null", vgId, file);
goto PARSE_TERM_ERROR;
}
root = cJSON_Parse(content);
if (root == NULL) {
vError("vgId:%d, failed to read %s since invalid json format", vgId, file);
goto PARSE_TERM_ERROR;
}
cJSON *term = cJSON_GetObjectItem(root, "term");
if (!term || term->type != cJSON_String) {
vError("vgId:%d, failed to read %s since term not found", vgId, file);
goto PARSE_TERM_ERROR;
}
pState->term = atoll(term->valuestring);
cJSON *voteFor = cJSON_GetObjectItem(root, "voteFor");
if (!voteFor || voteFor->type != cJSON_String) {
vError("vgId:%d, failed to read %s since voteFor not found", vgId, file);
goto PARSE_TERM_ERROR;
}
pState->voteFor = atoi(voteFor->valuestring);
vInfo("vgId:%d, read %s success, voteFor:%d, term:%" PRIu64, vgId, file, pState->voteFor, pState->term);
PARSE_TERM_ERROR:
if (content != NULL) free(content);
if (root != NULL) cJSON_Delete(root);
if (fp != NULL) fclose(fp);
return ret;
}
int32_t vnodeSaveState(int32_t vgId, SSyncServerState *pState) {
char file[PATH_MAX + 30] = {0};
sprintf(file, "%s/vnode%d/state.json", tsVnodeDir, vgId);
FILE *fp = fopen(file, "w");
if (!fp) {
vError("vgId:%d, failed to write %s since %s", vgId, file, strerror(errno));
return -1;
}
int32_t len = 0;
int32_t maxLen = 100;
char *content = calloc(1, maxLen + 1);
len += snprintf(content + len, maxLen - len, "{\n");
len += snprintf(content + len, maxLen - len, " \"term\": \"%" PRIu64 "\",\n", pState->term);
len += snprintf(content + len, maxLen - len, " \"voteFor\": \"%d\"\n", pState->voteFor);
len += snprintf(content + len, maxLen - len, "}\n");
fwrite(content, 1, len, fp);
taosFsyncFile(fileno(fp));
fclose(fp);
free(content);
vInfo("vgId:%d, write %s success, voteFor:%d, term:%" PRIu64, vgId, file, pState->voteFor, pState->term);
return TSDB_CODE_SUCCESS;
}
\ No newline at end of file
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#define _DEFAULT_SOURCE
#include "os.h"
#include "tstep.h"
#include "vnodeMain.h"
#include "vnodeMgmt.h"
#include "vnodeRead.h"
#include "vnodeWrite.h"
static struct {
SSteps *steps;
SVnodeFp fp;
void (*msgFp[TSDB_MSG_TYPE_MAX])(SRpcMsg *);
} tsVint;
void vnodeGetDnodeEp(int32_t dnodeId, char *ep, char *fqdn, uint16_t *port) {
return (*tsVint.fp.GetDnodeEp)(dnodeId, ep, fqdn, port);
}
void vnodeSendMsgToDnode(struct SRpcEpSet *epSet, struct SRpcMsg *rpcMsg) {
(*tsVint.fp.SendMsgToDnode)(epSet, rpcMsg);
}
void vnodeSendMsgToMnode(struct SRpcMsg *rpcMsg) { return (*tsVint.fp.SendMsgToMnode)(rpcMsg); }
void vnodeReportStartup(char *name, char *desc) { (*tsVint.fp.ReportStartup)(name, desc); }
void vnodeProcessMsg(SRpcMsg *pMsg) {
if (tsVint.msgFp[pMsg->msgType]) {
(*tsVint.msgFp[pMsg->msgType])(pMsg);
} else {
assert(0);
}
}
static void vnodeInitMsgFp() {
tsVint.msgFp[TSDB_MSG_TYPE_MD_CREATE_VNODE] = vnodeProcessMgmtMsg;
tsVint.msgFp[TSDB_MSG_TYPE_MD_ALTER_VNODE] = vnodeProcessMgmtMsg;
tsVint.msgFp[TSDB_MSG_TYPE_MD_SYNC_VNODE] = vnodeProcessMgmtMsg;
tsVint.msgFp[TSDB_MSG_TYPE_MD_COMPACT_VNODE] = vnodeProcessMgmtMsg;
tsVint.msgFp[TSDB_MSG_TYPE_MD_DROP_VNODE] = vnodeProcessMgmtMsg;
tsVint.msgFp[TSDB_MSG_TYPE_MD_ALTER_STREAM] = vnodeProcessMgmtMsg;
tsVint.msgFp[TSDB_MSG_TYPE_MD_CREATE_TABLE] = vnodeProcessWriteMsg;
tsVint.msgFp[TSDB_MSG_TYPE_MD_DROP_TABLE] = vnodeProcessWriteMsg;
tsVint.msgFp[TSDB_MSG_TYPE_MD_ALTER_TABLE] = vnodeProcessWriteMsg;
tsVint.msgFp[TSDB_MSG_TYPE_MD_DROP_STABLE] = vnodeProcessWriteMsg;
tsVint.msgFp[TSDB_MSG_TYPE_SUBMIT] = vnodeProcessWriteMsg;
tsVint.msgFp[TSDB_MSG_TYPE_UPDATE_TAG_VAL] = vnodeProcessWriteMsg;
// mq related
tsVint.msgFp[TSDB_MSG_TYPE_MQ_CONNECT] = vnodeProcessWriteMsg;
tsVint.msgFp[TSDB_MSG_TYPE_MQ_DISCONNECT] = vnodeProcessWriteMsg;
tsVint.msgFp[TSDB_MSG_TYPE_MQ_ACK] = vnodeProcessWriteMsg;
tsVint.msgFp[TSDB_MSG_TYPE_MQ_RESET] = vnodeProcessWriteMsg;
tsVint.msgFp[TSDB_MSG_TYPE_MQ_QUERY] = vnodeProcessReadMsg;
tsVint.msgFp[TSDB_MSG_TYPE_MQ_CONSUME] = vnodeProcessReadMsg;
// mq related end
tsVint.msgFp[TSDB_MSG_TYPE_QUERY] = vnodeProcessReadMsg;
tsVint.msgFp[TSDB_MSG_TYPE_FETCH] = vnodeProcessReadMsg;
}
int32_t vnodeInit(SVnodePara para) {
vnodeInitMsgFp();
tsVint.fp = para.fp;
struct SSteps *steps = taosStepInit(8, NULL);
if (steps == NULL) return -1;
taosStepAdd(steps, "vnode-main", vnodeInitMain, vnodeCleanupMain);
taosStepAdd(steps, "vnode-read", vnodeInitRead, vnodeCleanupRead);
taosStepAdd(steps, "vnode-mgmt", vnodeInitMgmt, vnodeCleanupMgmt);
taosStepAdd(steps, "vnode-write", vnodeInitWrite, vnodeCleanupWrite);
tsVint.steps = steps;
return taosStepExec(tsVint.steps);
}
void vnodeCleanup() { taosStepCleanup(tsVint.steps); }
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#define _DEFAULT_SOURCE
#include "thash.h"
#include "tthread.h"
#include "vnodeFile.h"
#include "vnodeMain.h"
#include "vnodeMgmt.h"
#include "vnodeRead.h"
#include "vnodeWrite.h"
typedef enum _VN_STATUS {
TAOS_VN_STATUS_INIT = 0,
TAOS_VN_STATUS_READY = 1,
TAOS_VN_STATUS_CLOSING = 2,
TAOS_VN_STATUS_UPDATING = 3
} EVnodeStatus;
char *vnodeStatus[] = {"init", "ready", "closing", "updating"};
typedef struct {
pthread_t *threadId;
int32_t threadIndex;
int32_t failed;
int32_t opened;
int32_t vnodeNum;
int32_t *vnodeList;
} SOpenVnodeThread;
static struct {
SHashObj *hash;
int32_t openVnodes;
int32_t totalVnodes;
} tsVnode;
static bool vnodeSetInitStatus(SVnode *pVnode) {
pthread_mutex_lock(&pVnode->statusMutex);
pVnode->status = TAOS_VN_STATUS_INIT;
pthread_mutex_unlock(&pVnode->statusMutex);
return true;
}
static bool vnodeSetReadyStatus(SVnode *pVnode) {
bool set = false;
pthread_mutex_lock(&pVnode->statusMutex);
if (pVnode->status == TAOS_VN_STATUS_INIT || pVnode->status == TAOS_VN_STATUS_UPDATING) {
pVnode->status = TAOS_VN_STATUS_READY;
set = true;
}
pthread_mutex_unlock(&pVnode->statusMutex);
return set;
}
static bool vnodeSetUpdatingStatus(SVnode *pVnode) {
bool set = false;
pthread_mutex_lock(&pVnode->statusMutex);
if (pVnode->status == TAOS_VN_STATUS_READY) {
pVnode->status = TAOS_VN_STATUS_UPDATING;
set = true;
}
pthread_mutex_unlock(&pVnode->statusMutex);
return set;
}
static bool vnodeSetClosingStatus(SVnode *pVnode) {
bool set = false;
pthread_mutex_lock(&pVnode->statusMutex);
if (pVnode->status == TAOS_VN_STATUS_INIT || pVnode->status == TAOS_VN_STATUS_READY) {
pVnode->status = TAOS_VN_STATUS_CLOSING;
set = true;
}
pthread_mutex_unlock(&pVnode->statusMutex);
return set;
}
static bool vnodeInStatus(SVnode *pVnode, EVnodeStatus status) {
bool in = false;
pthread_mutex_lock(&pVnode->statusMutex);
if (pVnode->status == status) {
in = true;
}
pthread_mutex_unlock(&pVnode->statusMutex);
return in;
}
static void vnodeDestroyVnode(SVnode *pVnode) {
int32_t code = 0;
int32_t vgId = pVnode->vgId;
if (pVnode->pSync != NULL) {
syncStop(pVnode->pSync);
pVnode->pSync = NULL;
}
if (pVnode->pQuery) {
// todo
}
if (pVnode->pMeta) {
// todo
}
if (pVnode->pTsdb) {
// todo
}
if (pVnode->pTQ) {
// todo
}
if (pVnode->pWal) {
walClose(pVnode->pWal);
pVnode->pWal = NULL;
}
if (pVnode->allocator) {
// todo
}
if (pVnode->pWriteQ) {
vnodeFreeWriteQueue(pVnode->pWriteQ);
pVnode->pWriteQ = NULL;
}
if (pVnode->pQueryQ) {
vnodeFreeQueryQueue(pVnode->pQueryQ);
pVnode->pQueryQ = NULL;
}
if (pVnode->pFetchQ) {
vnodeFreeFetchQueue(pVnode->pFetchQ);
pVnode->pFetchQ = NULL;
}
if (pVnode->dropped) {
// todo
}
pthread_mutex_destroy(&pVnode->statusMutex);
free(pVnode);
}
static void vnodeCleanupVnode(SVnode *pVnode) {
vnodeSetClosingStatus(pVnode);
taosHashRemove(tsVnode.hash, &pVnode->vgId, sizeof(int32_t));
vnodeRelease(pVnode);
}
static inline int32_t vnodeLogWrite(struct SSyncLogStore *logStore, SyncIndex index, SSyncBuffer *pBuf) {
SVnode *pVnode = logStore->pData; // vnode status can be checked here
return walWrite(pVnode->pWal, index, pBuf->data, (int32_t)pBuf->len);
}
static inline int32_t vnodeLogCommit(struct SSyncLogStore *logStore, SyncIndex index) {
SVnode *pVnode = logStore->pData; // vnode status can be checked here
return walCommit(pVnode->pWal, index);
}
static inline int32_t vnodeLogPrune(struct SSyncLogStore *logStore, SyncIndex index) {
SVnode *pVnode = logStore->pData; // vnode status can be checked here
return walPrune(pVnode->pWal, index);
}
static inline int32_t vnodeLogRollback(struct SSyncLogStore *logStore, SyncIndex index) {
SVnode *pVnode = logStore->pData; // vnode status can be checked here
return walRollback(pVnode->pWal, index);
}
static inline int32_t vnodeSaveServerState(struct SStateManager *stateMng, SSyncServerState *pState) {
SVnode *pVnode = stateMng->pData;
return vnodeSaveState(pVnode->vgId, pState);
}
static inline int32_t vnodeReadServerState(struct SStateManager *stateMng, SSyncServerState *pState) {
SVnode *pVnode = stateMng->pData;
return vnodeSaveState(pVnode->vgId, pState);
}
static inline int32_t vnodeApplyLog(struct SSyncFSM *fsm, SyncIndex index, const SSyncBuffer *buf, void *pData) {
return 0;
}
static inline int32_t vnodeOnClusterChanged(struct SSyncFSM *fsm, const SSyncCluster *cluster, void *pData) { return 0; }
static inline int32_t vnodeGetSnapshot(struct SSyncFSM *fsm, SSyncBuffer **ppBuf, int32_t *objId, bool *isLast) {
return 0;
}
static inline int32_t vnodeApplySnapshot(struct SSyncFSM *fsm, SSyncBuffer *pBuf, int32_t objId, bool isLast) {
return 0;
}
static inline int32_t vnodeOnRestoreDone(struct SSyncFSM *fsm) { return 0; }
static inline void vnodeOnRollback(struct SSyncFSM *fsm, SyncIndex index, const SSyncBuffer *buf) {}
static inline void vnodeOnRoleChanged(struct SSyncFSM *fsm, const SNodesRole *pRole) {}
static int32_t vnodeOpenVnode(int32_t vgId) {
int32_t code = 0;
SVnode *pVnode = calloc(sizeof(SVnode), 1);
if (pVnode == NULL) {
vError("vgId:%d, failed to open vnode since no enough memory", vgId);
return TAOS_SYSTEM_ERROR(errno);
}
pVnode->vgId = vgId;
pVnode->accessState = TAOS_VN_STATUS_INIT;
pVnode->status = TSDB_VN_ALL_ACCCESS;
pVnode->refCount = 1;
pVnode->role = TAOS_SYNC_ROLE_CANDIDATE;
pthread_mutex_init(&pVnode->statusMutex, NULL);
vDebug("vgId:%d, vnode is opened", pVnode->vgId);
taosHashPut(tsVnode.hash, &pVnode->vgId, sizeof(int32_t), &pVnode, sizeof(SVnode *));
code = vnodeReadCfg(vgId, &pVnode->cfg);
if (code != TSDB_CODE_SUCCESS) {
vError("vgId:%d, failed to read config file, set cfgVersion to 0", pVnode->vgId);
pVnode->cfg.dropped = 1;
vnodeCleanupVnode(pVnode);
return 0;
}
code = vnodeSaveState(vgId, &pVnode->term);
if (code != TSDB_CODE_SUCCESS) {
vError("vgId:%d, failed to read term file since %s", pVnode->vgId, tstrerror(code));
pVnode->cfg.dropped = 1;
vnodeCleanupVnode(pVnode);
return code;
}
pVnode->pWriteQ = vnodeAllocWriteQueue(pVnode);
pVnode->pQueryQ = vnodeAllocQueryQueue(pVnode);
pVnode->pFetchQ = vnodeAllocFetchQueue(pVnode);
if (pVnode->pWriteQ == NULL || pVnode->pQueryQ == NULL || pVnode->pFetchQ == NULL) {
vnodeCleanupVnode(pVnode);
return terrno;
}
char path[PATH_MAX + 20];
snprintf(path, sizeof(path), "%s/vnode%d/wal", tsVnodeDir, vgId);
pVnode->pWal = walOpen(path, &pVnode->cfg.wal);
if (pVnode->pWal == NULL) {
vnodeCleanupVnode(pVnode);
return terrno;
}
// create sync node
SSyncInfo syncInfo = {0};
syncInfo.vgId = vgId;
syncInfo.snapshotIndex = 0; // todo, from tsdb
memcpy(&syncInfo.syncCfg, &pVnode->cfg.sync, sizeof(SSyncCluster));
syncInfo.fsm.pData = pVnode;
syncInfo.fsm.applyLog = vnodeApplyLog;
syncInfo.fsm.onClusterChanged = vnodeOnClusterChanged;
syncInfo.fsm.getSnapshot = vnodeGetSnapshot;
syncInfo.fsm.applySnapshot = vnodeApplySnapshot;
syncInfo.fsm.onRestoreDone = vnodeOnRestoreDone;
syncInfo.fsm.onRollback = vnodeOnRollback;
syncInfo.fsm.onRoleChanged = vnodeOnRoleChanged;
syncInfo.logStore.pData = pVnode;
syncInfo.logStore.logWrite = vnodeLogWrite;
syncInfo.logStore.logCommit = vnodeLogCommit;
syncInfo.logStore.logPrune = vnodeLogPrune;
syncInfo.logStore.logRollback = vnodeLogRollback;
syncInfo.stateManager.pData = pVnode;
syncInfo.stateManager.saveServerState = vnodeSaveServerState;
syncInfo.stateManager.readServerState = vnodeReadServerState;
pVnode->pSync = syncStart(&syncInfo);
if (pVnode->pSync == NULL) {
vnodeCleanupVnode(pVnode);
return terrno;
}
vnodeSetReadyStatus(pVnode);
return TSDB_CODE_SUCCESS;
}
int32_t vnodeCreateVnode(int32_t vgId, SVnodeCfg *pCfg) {
int32_t code = 0;
char path[PATH_MAX + 20] = {0};
snprintf(path, sizeof(path), "%s/vnode%d", tsVnodeDir, vgId);
if (taosMkDir(path) < 0) {
code = TAOS_SYSTEM_ERROR(errno);
vError("vgId:%d, failed to create since %s", vgId, tstrerror(code));
return code;
}
snprintf(path, sizeof(path), "%s/vnode%d/cfg", tsVnodeDir, vgId);
if (taosMkDir(path) < 0) {
code = TAOS_SYSTEM_ERROR(errno);
vError("vgId:%d, failed to create since %s", vgId, tstrerror(code));
return code;
}
snprintf(path, sizeof(path), "%s/vnode%d/wal", tsVnodeDir, vgId);
if (taosMkDir(path) < 0) {
code = TAOS_SYSTEM_ERROR(errno);
vError("vgId:%d, failed to create since %s", vgId, tstrerror(code));
return code;
}
snprintf(path, sizeof(path), "%s/vnode%d/tq", tsVnodeDir, vgId);
if (taosMkDir(path) < 0) {
code = TAOS_SYSTEM_ERROR(errno);
vError("vgId:%d, failed to create since %s", vgId, tstrerror(code));
return code;
}
snprintf(path, sizeof(path), "%s/vnode%d/tsdb", tsVnodeDir, vgId);
if (taosMkDir(path) < 0) {
code = TAOS_SYSTEM_ERROR(errno);
vError("vgId:%d, failed to create since %s", vgId, tstrerror(code));
return code;
}
snprintf(path, sizeof(path), "%s/vnode%d/meta", tsVnodeDir, vgId);
if (taosMkDir(path) < 0) {
code = TAOS_SYSTEM_ERROR(errno);
vError("vgId:%d, failed to create since %s", vgId, tstrerror(code));
return code;
}
code = vnodeWriteCfg(vgId, pCfg);
if (code != 0) {
vError("vgId:%d, failed to save vnode cfg since %s", vgId, tstrerror(code));
return code;
}
return vnodeOpenVnode(vgId);
}
int32_t vnodeAlterVnode(SVnode * pVnode, SVnodeCfg *pCfg) {
int32_t code = 0;
int32_t vgId = pVnode->vgId;
bool walChanged = (memcmp(&pCfg->wal, &pVnode->cfg.wal, sizeof(SWalCfg)) != 0);
bool tsdbChanged = (memcmp(&pCfg->tsdb, &pVnode->cfg.tsdb, sizeof(STsdbCfg)) != 0);
bool metaChanged = (memcmp(&pCfg->meta, &pVnode->cfg.meta, sizeof(SMetaCfg)) != 0);
bool syncChanged = (memcmp(&pCfg->sync, &pVnode->cfg.sync, sizeof(SSyncCluster)) != 0);
if (!walChanged && !tsdbChanged && !metaChanged && !syncChanged) {
vDebug("vgId:%d, nothing changed", vgId);
vnodeRelease(pVnode);
return code;
}
code = vnodeWriteCfg(pVnode->vgId, pCfg);
if (code != 0) {
vError("vgId:%d, failed to write alter msg to file since %s", vgId, tstrerror(code));
vnodeRelease(pVnode);
return code;
}
pVnode->cfg = *pCfg;
if (walChanged) {
code = walAlter(pVnode->pWal, &pVnode->cfg.wal);
if (code != 0) {
vDebug("vgId:%d, failed to alter wal since %s", vgId, tstrerror(code));
vnodeRelease(pVnode);
return code;
}
}
if (tsdbChanged) {
// todo
}
if (metaChanged) {
// todo
}
if (syncChanged) {
syncReconfig(pVnode->pSync, &pVnode->cfg.sync);
}
vnodeRelease(pVnode);
return code;
}
int32_t vnodeDropVnode(SVnode *pVnode) {
if (pVnode->cfg.dropped) {
vInfo("vgId:%d, already set drop flag, ref:%d", pVnode->vgId, pVnode->refCount);
vnodeRelease(pVnode);
return TSDB_CODE_SUCCESS;
}
pVnode->cfg.dropped = 1;
int32_t code = vnodeWriteCfg(pVnode->vgId, &pVnode->cfg);
if (code == 0) {
vInfo("vgId:%d, set drop flag, ref:%d", pVnode->vgId, pVnode->refCount);
vnodeCleanupVnode(pVnode);
} else {
vError("vgId:%d, failed to set drop flag since %s", pVnode->vgId, tstrerror(code));
pVnode->cfg.dropped = 0;
}
vnodeRelease(pVnode);
return code;
}
int32_t vnodeSyncVnode(SVnode *pVnode) {
return TSDB_CODE_SUCCESS;
}
int32_t vnodeCompactVnode(SVnode *pVnode) {
return TSDB_CODE_SUCCESS;
}
static void *vnodeOpenVnodeFunc(void *param) {
SOpenVnodeThread *pThread = param;
vDebug("thread:%d, start to open %d vnodes", pThread->threadIndex, pThread->vnodeNum);
setThreadName("vnodeOpenVnode");
for (int32_t v = 0; v < pThread->vnodeNum; ++v) {
int32_t vgId = pThread->vnodeList[v];
char stepDesc[TSDB_STEP_DESC_LEN] = {0};
snprintf(stepDesc, TSDB_STEP_DESC_LEN, "vgId:%d, start to restore, %d of %d have been opened", vgId,
tsVnode.openVnodes, tsVnode.totalVnodes);
// (*vnodeInst()->fp.ReportStartup)("open-vnodes", stepDesc);
if (vnodeOpenVnode(vgId) < 0) {
vError("vgId:%d, failed to open vnode by thread:%d", vgId, pThread->threadIndex);
pThread->failed++;
} else {
vDebug("vgId:%d, is opened by thread:%d", vgId, pThread->threadIndex);
pThread->opened++;
}
atomic_add_fetch_32(&tsVnode.openVnodes, 1);
}
vDebug("thread:%d, total vnodes:%d, opened:%d failed:%d", pThread->threadIndex, pThread->vnodeNum, pThread->opened,
pThread->failed);
return NULL;
}
static int32_t vnodeGetVnodeListFromDisk(int32_t vnodeList[], int32_t *numOfVnodes) {
#if 0
DIR *dir = opendir(tsVnodeDir);
if (dir == NULL) return TSDB_CODE_DND_NO_WRITE_ACCESS;
*numOfVnodes = 0;
struct dirent *de = NULL;
while ((de = readdir(dir)) != NULL) {
if (strcmp(de->d_name, ".") == 0 || strcmp(de->d_name, "..") == 0) continue;
if (de->d_type & DT_DIR) {
if (strncmp("vnode", de->d_name, 5) != 0) continue;
int32_t vnode = atoi(de->d_name + 5);
if (vnode == 0) continue;
(*numOfVnodes)++;
if (*numOfVnodes >= TSDB_MAX_VNODES) {
vError("vgId:%d, too many vnode directory in disk, exist:%d max:%d", vnode, *numOfVnodes, TSDB_MAX_VNODES);
closedir(dir);
return TSDB_CODE_DND_TOO_MANY_VNODES;
} else {
vnodeList[*numOfVnodes - 1] = vnode;
}
}
}
closedir(dir);
#endif
return TSDB_CODE_SUCCESS;
}
static int32_t vnodeOpenVnodes() {
int32_t vnodeList[TSDB_MAX_VNODES] = {0};
int32_t numOfVnodes = 0;
int32_t status = vnodeGetVnodeListFromDisk(vnodeList, &numOfVnodes);
if (status != TSDB_CODE_SUCCESS) {
vInfo("failed to get vnode list from disk since code:%d", status);
return status;
}
tsVnode.totalVnodes = numOfVnodes;
int32_t threadNum = tsNumOfCores;
int32_t vnodesPerThread = numOfVnodes / threadNum + 1;
SOpenVnodeThread *threads = calloc(threadNum, sizeof(SOpenVnodeThread));
for (int32_t t = 0; t < threadNum; ++t) {
threads[t].threadIndex = t;
threads[t].vnodeList = calloc(vnodesPerThread, sizeof(int32_t));
}
for (int32_t v = 0; v < numOfVnodes; ++v) {
int32_t t = v % threadNum;
SOpenVnodeThread *pThread = &threads[t];
pThread->vnodeList[pThread->vnodeNum++] = vnodeList[v];
}
vInfo("start %d threads to open %d vnodes", threadNum, numOfVnodes);
for (int32_t t = 0; t < threadNum; ++t) {
SOpenVnodeThread *pThread = &threads[t];
if (pThread->vnodeNum == 0) continue;
pThread->threadId = taosCreateThread(vnodeOpenVnodeFunc, pThread);
if (pThread->threadId == NULL) {
vError("thread:%d, failed to create thread to open vnode, reason:%s", pThread->threadIndex, strerror(errno));
}
}
int32_t openVnodes = 0;
int32_t failedVnodes = 0;
for (int32_t t = 0; t < threadNum; ++t) {
SOpenVnodeThread *pThread = &threads[t];
taosDestoryThread(pThread->threadId);
pThread->threadId = NULL;
openVnodes += pThread->opened;
failedVnodes += pThread->failed;
free(pThread->vnodeList);
}
free(threads);
vInfo("there are total vnodes:%d, opened:%d", numOfVnodes, openVnodes);
if (failedVnodes != 0) {
vError("there are total vnodes:%d, failed:%d", numOfVnodes, failedVnodes);
return -1;
}
return TSDB_CODE_SUCCESS;
}
static int32_t vnodeGetVnodeList(SVnode *vnodeList[], int32_t *numOfVnodes) {
void *pIter = taosHashIterate(tsVnode.hash, NULL);
while (pIter) {
SVnode **pVnode = pIter;
if (*pVnode) {
(*numOfVnodes)++;
if (*numOfVnodes >= TSDB_MAX_VNODES) {
vError("vgId:%d, too many open vnodes, exist:%d max:%d", (*pVnode)->vgId, *numOfVnodes, TSDB_MAX_VNODES);
continue;
} else {
vnodeList[*numOfVnodes - 1] = (*pVnode);
}
}
pIter = taosHashIterate(tsVnode.hash, pIter);
}
return TSDB_CODE_SUCCESS;
}
static void vnodeCleanupVnodes() {
SVnode* vnodeList[TSDB_MAX_VNODES] = {0};
int32_t numOfVnodes = 0;
int32_t code = vnodeGetVnodeList(vnodeList, &numOfVnodes);
if (code != TSDB_CODE_SUCCESS) {
vInfo("failed to get dnode list since code %d", code);
return;
}
for (int32_t i = 0; i < numOfVnodes; ++i) {
vnodeCleanupVnode(vnodeList[i]);
}
vInfo("total vnodes:%d are all closed", numOfVnodes);
}
static void vnodeIncRef(void *ptNode) {
assert(ptNode != NULL);
SVnode **ppVnode = (SVnode **)ptNode;
assert(ppVnode);
assert(*ppVnode);
SVnode *pVnode = *ppVnode;
atomic_add_fetch_32(&pVnode->refCount, 1);
vTrace("vgId:%d, get vnode, refCount:%d pVnode:%p", pVnode->vgId, pVnode->refCount, pVnode);
}
SVnode *vnodeAcquireInAllState(int32_t vgId) {
SVnode *pVnode = NULL;
// taosHashGetClone(tsVnode.hash, &vgId, sizeof(int32_t), vnodeIncRef, (void*)&pVnode);
if (pVnode == NULL) {
vDebug("vgId:%d, can't accquire since not exist", vgId);
terrno = TSDB_CODE_VND_INVALID_VGROUP_ID;
return NULL;
}
return pVnode;
}
SVnode *vnodeAcquire(int32_t vgId) {
SVnode *pVnode = vnodeAcquireInAllState(vgId);
if (pVnode == NULL) return NULL;
if (vnodeInStatus(pVnode, TAOS_VN_STATUS_READY)) {
return pVnode;
} else {
vDebug("vgId:%d, can't accquire since not in ready status", vgId);
vnodeRelease(pVnode);
terrno = TSDB_CODE_VND_INVALID_TSDB_STATE;
return NULL;
}
}
void vnodeRelease(SVnode *pVnode) {
if (pVnode == NULL) return;
int32_t refCount = atomic_sub_fetch_32(&pVnode->refCount, 1);
int32_t vgId = pVnode->vgId;
vTrace("vgId:%d, release vnode, refCount:%d pVnode:%p", vgId, refCount, pVnode);
assert(refCount >= 0);
if (refCount <= 0) {
vDebug("vgId:%d, vnode will be destroyed, refCount:%d pVnode:%p", vgId, refCount, pVnode);
vnodeDestroyVnode(pVnode);
int32_t count = taosHashGetSize(tsVnode.hash);
vDebug("vgId:%d, vnode is destroyed, vnodes:%d", vgId, count);
}
}
int32_t vnodeInitMain() {
tsVnode.hash = taosHashInit(TSDB_MIN_VNODES, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_ENTRY_LOCK);
if (tsVnode.hash == NULL) {
vError("failed to init vnode mgmt");
return -1;
}
vInfo("vnode main is initialized");
return vnodeOpenVnodes();
}
void vnodeCleanupMain() {
vnodeCleanupVnodes();
taosHashCleanup(tsVnode.hash);
tsVnode.hash = NULL;
}
static void vnodeBuildVloadMsg(SVnode *pVnode, SStatusMsg *pStatus) {
int64_t totalStorage = 0;
int64_t compStorage = 0;
int64_t pointsWritten = 0;
if (pStatus->openVnodes >= TSDB_MAX_VNODES) return;
// if (pVnode->tsdb) {
// tsdbReportStat(pVnode->tsdb, &pointsWritten, &totalStorage, &compStorage);
// }
SVnodeLoad *pLoad = &pStatus->load[pStatus->openVnodes++];
pLoad->vgId = htonl(pVnode->vgId);
pLoad->totalStorage = htobe64(totalStorage);
pLoad->compStorage = htobe64(compStorage);
pLoad->pointsWritten = htobe64(pointsWritten);
pLoad->status = pVnode->status;
pLoad->role = pVnode->role;
}
void vnodeGetStatus(SStatusMsg *pStatus) {
void *pIter = taosHashIterate(tsVnode.hash, NULL);
while (pIter) {
SVnode **pVnode = pIter;
if (*pVnode) {
vnodeBuildVloadMsg(*pVnode, pStatus);
}
pIter = taosHashIterate(tsVnode.hash, pIter);
}
}
void vnodeSetAccess(SVgroupAccess *pAccess, int32_t numOfVnodes) {
for (int32_t i = 0; i < numOfVnodes; ++i) {
pAccess[i].vgId = htonl(pAccess[i].vgId);
SVnode *pVnode = vnodeAcquire(pAccess[i].vgId);
if (pVnode != NULL) {
pVnode->accessState = pAccess[i].accessState;
if (pVnode->accessState != TSDB_VN_ALL_ACCCESS) {
vDebug("vgId:%d, access state is set to %d", pAccess[i].vgId, pVnode->accessState);
}
vnodeRelease(pVnode);
}
}
}
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#define _DEFAULT_SOURCE
#include "os.h"
#include "vnodeMain.h"
#include "vnodeMgmt.h"
static struct {
SWorkerPool createPool;
taos_queue createQueue;
SWorkerPool workerPool;
taos_queue workerQueue;
int32_t (*msgFp[TSDB_MSG_TYPE_MAX])(SRpcMsg *);
} tsVmgmt = {0};
static int32_t vnodeParseCreateVnodeReq(SRpcMsg *rpcMsg, int32_t *vgId, SVnodeCfg *pCfg) {
SCreateVnodeMsg *pCreate = rpcMsg->pCont;
*vgId = htonl(pCreate->vgId);
pCfg->dropped = 0;
pCfg->quorum = pCreate->quorum;
tstrncpy(pCfg->db, pCreate->db, sizeof(pCfg->db));
pCfg->tsdb.cacheBlockSize = htonl(pCreate->cacheBlockSize);
pCfg->tsdb.totalBlocks = htonl(pCreate->totalBlocks);
pCfg->tsdb.daysPerFile = htonl(pCreate->daysPerFile);
pCfg->tsdb.daysToKeep1 = htonl(pCreate->daysToKeep1);
pCfg->tsdb.daysToKeep2 = htonl(pCreate->daysToKeep2);
pCfg->tsdb.daysToKeep0 = htonl(pCreate->daysToKeep0);
pCfg->tsdb.minRowsPerFileBlock = htonl(pCreate->minRowsPerFileBlock);
pCfg->tsdb.maxRowsPerFileBlock = htonl(pCreate->maxRowsPerFileBlock);
pCfg->tsdb.precision = pCreate->precision;
pCfg->tsdb.compression = pCreate->compression;
pCfg->tsdb.cacheLastRow = pCreate->cacheLastRow;
pCfg->tsdb.update = pCreate->update;
pCfg->wal.fsyncPeriod = htonl(pCreate->fsyncPeriod);
pCfg->wal.walLevel = pCreate->walLevel;
pCfg->sync.replica = pCreate->replica;
pCfg->sync.selfIndex = pCreate->selfIndex;
for (int32_t j = 0; j < pCreate->replica; ++j) {
pCfg->sync.nodeInfo[j].nodePort = htons(pCreate->nodes[j].port);
tstrncpy(pCfg->sync.nodeInfo[j].nodeFqdn, pCreate->nodes[j].fqdn, TSDB_FQDN_LEN);
}
return 0;
}
static int32_t vnodeProcessCreateVnodeReq(SRpcMsg *rpcMsg) {
SVnodeCfg vnodeCfg = {0};
int32_t vgId = 0;
int32_t code = vnodeParseCreateVnodeReq(rpcMsg, &vgId, &vnodeCfg);
if (code != 0) {
vError("failed to parse create vnode msg since %s", tstrerror(code));
}
vDebug("vgId:%d, create vnode req is received", vgId);
SVnode *pVnode = vnodeAcquireInAllState(vgId);
if (pVnode != NULL) {
vDebug("vgId:%d, already exist, return success", vgId);
vnodeRelease(pVnode);
return code;
}
code = vnodeCreateVnode(vgId, &vnodeCfg);
if (code != 0) {
vError("vgId:%d, failed to create vnode since %s", vgId, tstrerror(code));
}
return code;
}
static int32_t vnodeProcessAlterVnodeReq(SRpcMsg *rpcMsg) {
SVnodeCfg vnodeCfg = {0};
int32_t vgId = 0;
int32_t code = vnodeParseCreateVnodeReq(rpcMsg, &vgId, &vnodeCfg);
if (code != 0) {
vError("failed to parse create vnode msg since %s", tstrerror(code));
}
vDebug("vgId:%d, alter vnode req is received", vgId);
SVnode *pVnode = vnodeAcquire(vgId);
if (pVnode == NULL) {
code = terrno;
vDebug("vgId:%d, failed to alter vnode since %s", vgId, tstrerror(code));
return code;
}
code = vnodeAlterVnode(pVnode, &vnodeCfg);
if (code != 0) {
vError("vgId:%d, failed to alter vnode since %s", vgId, tstrerror(code));
}
vnodeRelease(pVnode);
return code;
}
static SDropVnodeMsg *vnodeParseDropVnodeReq(SRpcMsg *rpcMsg) {
SDropVnodeMsg *pDrop = rpcMsg->pCont;
pDrop->vgId = htonl(pDrop->vgId);
return pDrop;
}
static int32_t vnodeProcessSyncVnodeReq(SRpcMsg *rpcMsg) {
SSyncVnodeMsg *pSync = (SSyncVnodeMsg *)vnodeParseDropVnodeReq(rpcMsg);
int32_t code = 0;
int32_t vgId = pSync->vgId;
vDebug("vgId:%d, sync vnode req is received", vgId);
SVnode *pVnode = vnodeAcquire(vgId);
if (pVnode == NULL) {
code = terrno;
vDebug("vgId:%d, failed to sync since %s", vgId, tstrerror(code));
return code;
}
code = vnodeSyncVnode(pVnode);
if (code != 0) {
vError("vgId:%d, failed to compact vnode since %s", vgId, tstrerror(code));
}
vnodeRelease(pVnode);
return code;
}
static int32_t vnodeProcessCompactVnodeReq(SRpcMsg *rpcMsg) {
SCompactVnodeMsg *pCompact = (SCompactVnodeMsg *)vnodeParseDropVnodeReq(rpcMsg);
int32_t code = 0;
int32_t vgId = pCompact->vgId;
vDebug("vgId:%d, compact vnode req is received", vgId);
SVnode *pVnode = vnodeAcquire(vgId);
if (pVnode == NULL) {
code = terrno;
vDebug("vgId:%d, failed to compact since %s", vgId, tstrerror(code));
return code;
}
code = vnodeCompactVnode(pVnode);
if (code != 0) {
vError("vgId:%d, failed to compact vnode since %s", vgId, tstrerror(code));
}
vnodeRelease(pVnode);
return code;
}
static int32_t vnodeProcessDropVnodeReq(SRpcMsg *rpcMsg) {
SDropVnodeMsg *pDrop = vnodeParseDropVnodeReq(rpcMsg);
int32_t code = 0;
int32_t vgId = pDrop->vgId;
vDebug("vgId:%d, drop vnode req is received", vgId);
SVnode *pVnode = vnodeAcquire(vgId);
if (pVnode == NULL) {
code = terrno;
vDebug("vgId:%d, failed to drop since %s", vgId, tstrerror(code));
return code;
}
code = vnodeDropVnode(pVnode);
if (code != 0) {
vError("vgId:%d, failed to drop vnode since %s", vgId, tstrerror(code));
}
vnodeRelease(pVnode);
return code;
}
static int32_t vnodeProcessAlterStreamReq(SRpcMsg *pMsg) {
vError("alter stream msg not processed");
return TSDB_CODE_VND_MSG_NOT_PROCESSED;
}
static int32_t vnodeProcessMgmtStart(void *unused, SVnMgmtMsg *pMgmt, int32_t qtype) {
SRpcMsg *pMsg = &pMgmt->rpcMsg;
int32_t msgType = pMsg->msgType;
if (tsVmgmt.msgFp[msgType]) {
vTrace("msg:%p, ahandle:%p type:%s will be processed", pMgmt, pMsg->ahandle, taosMsg[msgType]);
return (*tsVmgmt.msgFp[msgType])(pMsg);
} else {
vError("msg:%p, ahandle:%p type:%s not processed since no handle", pMgmt, pMsg->ahandle, taosMsg[msgType]);
return TSDB_CODE_DND_MSG_NOT_PROCESSED;
}
}
static void vnodeProcessMgmtEnd(void *unused, SVnMgmtMsg *pMgmt, int32_t qtype, int32_t code) {
SRpcMsg *pMsg = &pMgmt->rpcMsg;
vTrace("msg:%p, is processed, result:%s", pMgmt, tstrerror(code));
SRpcMsg rsp = {.code = code, .handle = pMsg->handle};
rpcSendResponse(&rsp);
taosFreeQitem(pMgmt);
}
static void vnodeInitMgmtReqFp() {
tsVmgmt.msgFp[TSDB_MSG_TYPE_MD_CREATE_VNODE] = vnodeProcessCreateVnodeReq;
tsVmgmt.msgFp[TSDB_MSG_TYPE_MD_ALTER_VNODE] = vnodeProcessAlterVnodeReq;
tsVmgmt.msgFp[TSDB_MSG_TYPE_MD_SYNC_VNODE] = vnodeProcessSyncVnodeReq;
tsVmgmt.msgFp[TSDB_MSG_TYPE_MD_COMPACT_VNODE] = vnodeProcessCompactVnodeReq;
tsVmgmt.msgFp[TSDB_MSG_TYPE_MD_DROP_VNODE] = vnodeProcessDropVnodeReq;
tsVmgmt.msgFp[TSDB_MSG_TYPE_MD_ALTER_STREAM] = vnodeProcessAlterStreamReq;
}
static int32_t vnodeWriteToMgmtQueue(SRpcMsg *pMsg) {
int32_t size = sizeof(SVnMgmtMsg) + pMsg->contLen;
SVnMgmtMsg *pMgmt = taosAllocateQitem(size);
if (pMgmt == NULL) return TSDB_CODE_DND_OUT_OF_MEMORY;
pMgmt->rpcMsg = *pMsg;
pMgmt->rpcMsg.pCont = pMgmt->pCont;
memcpy(pMgmt->pCont, pMsg->pCont, pMsg->contLen);
if (pMsg->msgType == TSDB_MSG_TYPE_MD_CREATE_VNODE) {
return taosWriteQitem(tsVmgmt.createQueue, TAOS_QTYPE_RPC, pMgmt);
} else {
return taosWriteQitem(tsVmgmt.workerQueue, TAOS_QTYPE_RPC, pMgmt);
}
}
void vnodeProcessMgmtMsg(SRpcMsg *pMsg) {
int32_t code = vnodeWriteToMgmtQueue(pMsg);
if (code != TSDB_CODE_SUCCESS) {
vError("msg, ahandle:%p type:%s not processed since %s", pMsg->ahandle, taosMsg[pMsg->msgType], tstrerror(code));
SRpcMsg rsp = {.handle = pMsg->handle, .code = code};
rpcSendResponse(&rsp);
}
rpcFreeCont(pMsg->pCont);
}
int32_t vnodeInitMgmt() {
vnodeInitMgmtReqFp();
SWorkerPool *pPool = &tsVmgmt.createPool;
pPool->name = "vnode-mgmt-create";
pPool->startFp = (ProcessStartFp)vnodeProcessMgmtStart;
pPool->endFp = (ProcessEndFp)vnodeProcessMgmtEnd;
pPool->min = 1;
pPool->max = 1;
if (tWorkerInit(pPool) != 0) {
return TSDB_CODE_VND_OUT_OF_MEMORY;
}
tsVmgmt.createQueue = tWorkerAllocQueue(pPool, NULL);
pPool = &tsVmgmt.workerPool;
pPool->name = "vnode-mgmt-worker";
pPool->startFp = (ProcessStartFp)vnodeProcessMgmtStart;
pPool->endFp = (ProcessEndFp)vnodeProcessMgmtEnd;
pPool->min = 1;
pPool->max = 1;
if (tWorkerInit(pPool) != 0) {
return TSDB_CODE_VND_OUT_OF_MEMORY;
}
tsVmgmt.workerQueue = tWorkerAllocQueue(pPool, NULL);
vInfo("vmgmt is initialized");
return TSDB_CODE_SUCCESS;
}
void vnodeCleanupMgmt() {
tWorkerFreeQueue(&tsVmgmt.createPool, tsVmgmt.createQueue);
tWorkerCleanup(&tsVmgmt.createPool);
tsVmgmt.createQueue = NULL;
tWorkerFreeQueue(&tsVmgmt.workerPool, tsVmgmt.workerQueue);
tWorkerCleanup(&tsVmgmt.workerPool);
tsVmgmt.createQueue = NULL;
vInfo("vmgmt is closed");
}
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#define _DEFAULT_SOURCE
#include "vnodeMain.h"
#include "vnodeRead.h"
#include "vnodeReadMsg.h"
static struct {
SWorkerPool query;
SWorkerPool fetch;
int32_t (*msgFp[TSDB_MSG_TYPE_MAX])(SVnode *, struct SReadMsg *);
} tsVread = {0};
void vnodeStartRead(SVnode *pVnode) {}
void vnodeStopRead(SVnode *pVnode) {}
void vnodeWaitReadCompleted(SVnode *pVnode) {
while (pVnode->queuedRMsg > 0) {
vTrace("vgId:%d, queued rmsg num:%d", pVnode->vgId, pVnode->queuedRMsg);
taosMsleep(10);
}
}
static int32_t vnodeWriteToRQueue(SVnode *pVnode, void *pCont, int32_t contLen, int8_t qtype, SRpcMsg *pRpcMsg) {
if (pVnode->dropped) {
return TSDB_CODE_APP_NOT_READY;
}
#if 0
if (!((pVnode->role == TAOS_SYNC_ROLE_MASTER) || (tsEnableSlaveQuery && pVnode->role == TAOS_SYNC_ROLE_SLAVE))) {
return TSDB_CODE_APP_NOT_READY;
}
#endif
int32_t size = sizeof(SReadMsg) + contLen;
SReadMsg *pRead = taosAllocateQitem(size);
if (pRead == NULL) {
return TSDB_CODE_VND_OUT_OF_MEMORY;
}
if (pRpcMsg != NULL) {
pRead->rpcHandle = pRpcMsg->handle;
pRead->rpcAhandle = pRpcMsg->ahandle;
pRead->msgType = pRpcMsg->msgType;
pRead->code = pRpcMsg->code;
}
if (contLen != 0) {
pRead->contLen = contLen;
memcpy(pRead->pCont, pCont, contLen);
} else {
pRead->qhandle = pCont;
}
pRead->qtype = qtype;
atomic_add_fetch_32(&pVnode->refCount, 1);
atomic_add_fetch_32(&pVnode->queuedRMsg, 1);
if (pRead->code == TSDB_CODE_RPC_NETWORK_UNAVAIL || pRead->msgType == TSDB_MSG_TYPE_FETCH) {
return taosWriteQitem(pVnode->pFetchQ, qtype, pRead);
} else {
return taosWriteQitem(pVnode->pQueryQ, qtype, pRead);
}
}
static void vnodeFreeFromRQueue(SVnode *pVnode, SReadMsg *pRead) {
atomic_sub_fetch_32(&pVnode->queuedRMsg, 1);
taosFreeQitem(pRead);
vnodeRelease(pVnode);
}
int32_t vnodeReputPutToRQueue(SVnode *pVnode, void **qhandle, void *ahandle) {
SRpcMsg rpcMsg = {0};
rpcMsg.msgType = TSDB_MSG_TYPE_QUERY;
rpcMsg.ahandle = ahandle;
int32_t code = vnodeWriteToRQueue(pVnode, qhandle, 0, TAOS_QTYPE_QUERY, &rpcMsg);
if (code == TSDB_CODE_SUCCESS) {
vTrace("QInfo:%p add to vread queue for exec query", *qhandle);
}
return code;
}
void vnodeProcessReadMsg(SRpcMsg *pMsg) {
int32_t queuedMsgNum = 0;
int32_t leftLen = pMsg->contLen;
int32_t code = TSDB_CODE_VND_INVALID_VGROUP_ID;
char * pCont = pMsg->pCont;
while (leftLen > 0) {
SMsgHead *pHead = (SMsgHead *)pCont;
pHead->vgId = htonl(pHead->vgId);
pHead->contLen = htonl(pHead->contLen);
assert(pHead->contLen > 0);
SVnode *pVnode = vnodeAcquire(pHead->vgId);
if (pVnode != NULL) {
code = vnodeWriteToRQueue(pVnode, pCont, pHead->contLen, TAOS_QTYPE_RPC, pMsg);
if (code == TSDB_CODE_SUCCESS) queuedMsgNum++;
vnodeRelease(pVnode);
}
leftLen -= pHead->contLen;
pCont -= pHead->contLen;
}
if (queuedMsgNum == 0) {
SRpcMsg rpcRsp = {.handle = pMsg->handle, .code = code};
rpcSendResponse(&rpcRsp);
}
rpcFreeCont(pMsg->pCont);
}
static void vnodeInitReadMsgFp() {
tsVread.msgFp[TSDB_MSG_TYPE_QUERY] = vnodeProcessQueryMsg;
tsVread.msgFp[TSDB_MSG_TYPE_FETCH] = vnodeProcessFetchMsg;
tsVread.msgFp[TSDB_MSG_TYPE_MQ_QUERY] = vnodeProcessTqQueryMsg;
tsVread.msgFp[TSDB_MSG_TYPE_MQ_CONSUME] = vnodeProcessConsumeMsg;
}
static int32_t vnodeProcessReadStart(SVnode *pVnode, SReadMsg *pRead, int32_t qtype) {
int32_t msgType = pRead->msgType;
if (tsVread.msgFp[msgType] == NULL) {
vDebug("vgId:%d, msgType:%s not processed, no handle", pVnode->vgId, taosMsg[msgType]);
return TSDB_CODE_VND_MSG_NOT_PROCESSED;
} else {
vTrace("msg:%p, app:%p type:%s will be processed", pRead, pRead->rpcAhandle, taosMsg[msgType]);
}
return (*tsVread.msgFp[msgType])(pVnode, pRead);
}
static void vnodeSendReadRsp(SReadMsg *pRead, int32_t code) {
SRpcMsg rpcRsp = {
.handle = pRead->rpcHandle,
.pCont = pRead->rspRet.rsp,
.contLen = pRead->rspRet.len,
.code = code,
};
rpcSendResponse(&rpcRsp);
}
static void vnodeProcessReadEnd(SVnode *pVnode, SReadMsg *pRead, int32_t qtype, int32_t code) {
if (qtype == TAOS_QTYPE_RPC && code != TSDB_CODE_QRY_NOT_READY) {
vnodeSendReadRsp(pRead, code);
} else {
if (code == TSDB_CODE_QRY_HAS_RSP) {
vnodeSendReadRsp(pRead, pRead->code);
} else { // code == TSDB_CODE_QRY_NOT_READY, do not return msg to client
assert(pRead->rpcHandle == NULL || (pRead->rpcHandle != NULL && pRead->msgType == 5));
}
}
vnodeFreeFromRQueue(pVnode, pRead);
}
int32_t vnodeInitRead() {
vnodeInitReadMsgFp();
int32_t maxFetchThreads = 4;
float threadsForQuery = MAX(tsNumOfCores * tsRatioOfQueryCores, 1);
SWorkerPool *pPool = &tsVread.query;
pPool->name = "vquery";
pPool->startFp = (ProcessStartFp)vnodeProcessReadStart;
pPool->endFp = (ProcessEndFp)vnodeProcessReadEnd;
pPool->min = (int32_t)threadsForQuery;
pPool->max = pPool->min;
if (tWorkerInit(pPool) != 0) return -1;
pPool = &tsVread.fetch;
pPool->name = "vfetch";
pPool->startFp = (ProcessStartFp)vnodeProcessReadStart;
pPool->endFp = (ProcessEndFp)vnodeProcessReadEnd;
pPool->min = MIN(maxFetchThreads, tsNumOfCores);
pPool->max = pPool->min;
if (tWorkerInit(pPool) != 0) return -1;
vInfo("vread is initialized, max worker %d", pPool->max);
return 0;
}
void vnodeCleanupRead() {
tWorkerCleanup(&tsVread.fetch);
tWorkerCleanup(&tsVread.query);
vInfo("vread is closed");
}
taos_queue vnodeAllocQueryQueue(SVnode *pVnode) { return tWorkerAllocQueue(&tsVread.query, pVnode); }
taos_queue vnodeAllocFetchQueue(SVnode *pVnode) { return tWorkerAllocQueue(&tsVread.fetch, pVnode); }
void vnodeFreeQueryQueue(taos_queue pQueue) { tWorkerFreeQueue(&tsVread.query, pQueue); }
void vnodeFreeFetchQueue(taos_queue pQueue) { tWorkerFreeQueue(&tsVread.fetch, pQueue); }
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#define _DEFAULT_SOURCE
#include "vnodeMain.h"
#include "vnodeRead.h"
#include "vnodeReadMsg.h"
#if 0
// notify connection(handle) that current qhandle is created, if current connection from
// client is broken, the query needs to be killed immediately.
static int32_t vnodeNotifyCurrentQhandle(void *handle, uint64_t qId, void *qhandle, int32_t vgId) {
SRetrieveTableMsg *pMsg = rpcMallocCont(sizeof(SRetrieveTableMsg));
pMsg->qId = htobe64(qId);
pMsg->header.vgId = htonl(vgId);
pMsg->header.contLen = htonl(sizeof(SRetrieveTableMsg));
vTrace("QInfo:0x%" PRIx64 "-%p register qhandle to connect:%p", qId, qhandle, handle);
return rpcReportProgress(handle, (char *)pMsg, sizeof(SRetrieveTableMsg));
}
/**
* @param pRet response message object
* @param pVnode the vnode object
* @param handle qhandle for executing query
* @param freeHandle free qhandle or not
* @param ahandle sqlObj address at client side
* @return
*/
static int32_t vnodeDumpQueryResult(SVnRsp *pRet, void *pVnode, uint64_t qId, void **handle, bool *freeHandle,
void *ahandle) {
bool continueExec = false;
int32_t code = TSDB_CODE_SUCCESS;
if ((code = qDumpRetrieveResult(*handle, (SRetrieveTableRsp **)&pRet->rsp, &pRet->len, &continueExec)) ==
TSDB_CODE_SUCCESS) {
if (continueExec) {
*freeHandle = false;
code = vnodeReputPutToRQueue(pVnode, handle, ahandle);
if (code != TSDB_CODE_SUCCESS) {
*freeHandle = true;
return code;
} else {
pRet->qhandle = *handle;
}
} else {
*freeHandle = true;
vTrace("QInfo:0x%" PRIx64 "-%p exec completed, free handle:%d", qId, *handle, *freeHandle);
}
} else {
SRetrieveTableRsp *pRsp = (SRetrieveTableRsp *)rpcMallocCont(sizeof(SRetrieveTableRsp));
memset(pRsp, 0, sizeof(SRetrieveTableRsp));
pRsp->completed = true;
pRet->rsp = pRsp;
pRet->len = sizeof(SRetrieveTableRsp);
*freeHandle = true;
}
return code;
}
static void vnodeBuildNoResultQueryRsp(SVnRsp *pRet) {
pRet->rsp = (SRetrieveTableRsp *)rpcMallocCont(sizeof(SRetrieveTableRsp));
pRet->len = sizeof(SRetrieveTableRsp);
memset(pRet->rsp, 0, sizeof(SRetrieveTableRsp));
SRetrieveTableRsp *pRsp = pRet->rsp;
pRsp->completed = true;
}
#endif
int32_t vnodeProcessQueryMsg(SVnode *pVnode, SReadMsg *pRead) {
#if 0
void * pCont = pRead->pCont;
int32_t contLen = pRead->contLen;
SVnRsp *pRet = &pRead->rspRet;
SQueryTableMsg *pQueryTableMsg = (SQueryTableMsg *)pCont;
memset(pRet, 0, sizeof(SVnRsp));
// qHandle needs to be freed correctly
if (pRead->code == TSDB_CODE_RPC_NETWORK_UNAVAIL) {
vError("error rpc msg in query, %s", tstrerror(pRead->code));
}
int32_t code = TSDB_CODE_SUCCESS;
void ** handle = NULL;
if (contLen != 0) {
qinfo_t pQInfo = NULL;
uint64_t qId = genQueryId();
code = qCreateQueryInfo(pVnode->tsdb, pVnode->vgId, pQueryTableMsg, &pQInfo, qId);
SQueryTableRsp *pRsp = (SQueryTableRsp *)rpcMallocCont(sizeof(SQueryTableRsp));
pRsp->code = code;
pRsp->qId = 0;
pRet->len = sizeof(SQueryTableRsp);
pRet->rsp = pRsp;
int32_t vgId = pVnode->vgId;
// current connect is broken
if (code == TSDB_CODE_SUCCESS) {
handle = qRegisterQInfo(pVnode->qMgmt, qId, pQInfo);
if (handle == NULL) { // failed to register qhandle
pRsp->code = terrno;
terrno = 0;
vError("vgId:%d, QInfo:0x%" PRIx64 "-%p register qhandle failed, return to app, code:%s,", pVnode->vgId, qId,
(void *)pQInfo, tstrerror(pRsp->code));
qDestroyQueryInfo(pQInfo); // destroy it directly
return pRsp->code;
} else {
assert(*handle == pQInfo);
pRsp->qId = htobe64(qId);
}
if (handle != NULL &&
vnodeNotifyCurrentQhandle(pRead->rpcHandle, qId, *handle, pVnode->vgId) != TSDB_CODE_SUCCESS) {
vError("vgId:%d, QInfo:0x%" PRIx64 "-%p, query discarded since link is broken, %p", pVnode->vgId, qId, *handle,
pRead->rpcHandle);
pRsp->code = TSDB_CODE_RPC_NETWORK_UNAVAIL;
qReleaseQInfo(pVnode->qMgmt, (void **)&handle, true);
return pRsp->code;
}
} else {
assert(pQInfo == NULL);
}
if (handle != NULL) {
vTrace("vgId:%d, QInfo:0x%" PRIx64 "-%p, query msg disposed, create qhandle and returns to app", vgId, qId,
*handle);
code = vnodeReputPutToRQueue(pVnode, handle, pRead->rpcHandle);
if (code != TSDB_CODE_SUCCESS) {
pRsp->code = code;
qReleaseQInfo(pVnode->qMgmt, (void **)&handle, true);
return pRsp->code;
}
}
int32_t remain = atomic_add_fetch_32(&pVnode->numOfQHandle, 1);
vTrace("vgId:%d, new qhandle created, total qhandle:%d", pVnode->vgId, remain);
} else {
assert(pCont != NULL);
void ** qhandle = (void **)pRead->qhandle;
uint64_t qId = 0;
vTrace("vgId:%d, QInfo:%p, continues to exec query", pVnode->vgId, *qhandle);
// In the retrieve blocking model, only 50% CPU will be used in query processing
if (tsRetrieveBlockingModel) {
qTableQuery(*qhandle, &qId); // do execute query
qReleaseQInfo(pVnode->qMgmt, (void **)&qhandle, false);
} else {
bool freehandle = false;
bool buildRes = qTableQuery(*qhandle, &qId); // do execute query
// build query rsp, the retrieve request has reached here already
if (buildRes) {
// update the connection info according to the retrieve connection
pRead->rpcHandle = qGetResultRetrieveMsg(*qhandle);
assert(pRead->rpcHandle != NULL);
vTrace("vgId:%d, QInfo:%p, start to build retrieval rsp after query paused, %p", pVnode->vgId, *qhandle,
pRead->rpcHandle);
// set the real rsp error code
pRead->code = vnodeDumpQueryResult(&pRead->rspRet, pVnode, qId, qhandle, &freehandle, pRead->rpcHandle);
// NOTE: set return code to be TSDB_CODE_QRY_HAS_RSP to notify dnode to return msg to client
code = TSDB_CODE_QRY_HAS_RSP;
} else {
// void *h1 = qGetResultRetrieveMsg(*qhandle);
/* remove this assert, one possible case that will cause h1 not NULL: query thread unlock pQInfo->lock, and then
* FETCH thread execute twice before query thread reach here */
// assert(h1 == NULL);
freehandle = qQueryCompleted(*qhandle);
}
// NOTE: if the qhandle is not put into vread queue or query is completed, free the qhandle.
// If the building of result is not required, simply free it. Otherwise, mandatorily free the qhandle
if (freehandle || (!buildRes)) {
if (freehandle) {
int32_t remain = atomic_sub_fetch_32(&pVnode->numOfQHandle, 1);
vTrace("vgId:%d, QInfo:%p, start to free qhandle, remain qhandle:%d", pVnode->vgId, *qhandle, remain);
}
qReleaseQInfo(pVnode->qMgmt, (void **)&qhandle, freehandle);
}
}
}
return code;
#endif
return 0;
}
//mq related
int32_t vnodeProcessConsumeMsg(SVnode *pVnode, SReadMsg *pRead) {
//parse message and optionally move offset
void* pMsg = pRead->pCont;
TmqConsumeReq *pConsumeMsg = (TmqConsumeReq*) pMsg;
TmqMsgHead msgHead = pConsumeMsg->head;
//extract head
STQ *pTq = pVnode->pTQ;
/*tqBufferHandle *pHandle = tqGetHandle(pTq, msgHead.clientId);*/
//return msg if offset not moved
/*if(pConsumeMsg->commitOffset == pHandle->consumeOffset) {*/
//return msg
/*return 0;*/
/*}*/
//or move offset
/*tqMoveOffsetToNext(pHandle);*/
//fetch or register context
/*tqFetchMsg(pHandle, pRead);*/
//judge mode, tail read or catch up read
/*int64_t lastVer = walLastVer(pVnode->wal);*/
//launch new query
return 0;
}
int32_t vnodeProcessTqQueryMsg(SVnode *pVnode, SReadMsg *pRead) {
//get operator tree from tq data structure
//execute operator tree
//put data into ringbuffer
//unref memory
return 0;
}
//mq related end
int32_t vnodeProcessFetchMsg(SVnode *pVnode, SReadMsg *pRead) {
#if 0
void * pCont = pRead->pCont;
SVnRsp *pRet = &pRead->rspRet;
SRetrieveTableMsg *pRetrieve = pCont;
pRetrieve->free = htons(pRetrieve->free);
pRetrieve->qId = htobe64(pRetrieve->qId);
vTrace("vgId:%d, qId:0x%" PRIx64 ", retrieve msg is disposed, free:%d, conn:%p", pVnode->vgId, pRetrieve->qId,
pRetrieve->free, pRead->rpcHandle);
memset(pRet, 0, sizeof(SVnRsp));
terrno = TSDB_CODE_SUCCESS;
int32_t code = TSDB_CODE_SUCCESS;
void ** handle = qAcquireQInfo(pVnode->qMgmt, pRetrieve->qId);
if (handle == NULL) {
code = terrno;
terrno = TSDB_CODE_SUCCESS;
} else if (!checkQIdEqual(*handle, pRetrieve->qId)) {
code = TSDB_CODE_QRY_INVALID_QHANDLE;
}
if (code != TSDB_CODE_SUCCESS) {
vError("vgId:%d, invalid qId in retrieving result, code:%s, QInfo:%" PRIu64, pVnode->vgId, tstrerror(code),
pRetrieve->qId);
vnodeBuildNoResultQueryRsp(pRet);
return code;
}
// kill current query and free corresponding resources.
if (pRetrieve->free == 1) {
int32_t remain = atomic_sub_fetch_32(&pVnode->numOfQHandle, 1);
vWarn("vgId:%d, QInfo:%" PRIx64 "-%p, retrieve msg received to kill query and free qhandle, remain qhandle:%d",
pVnode->vgId, pRetrieve->qId, *handle, remain);
qKillQuery(*handle);
qReleaseQInfo(pVnode->qMgmt, (void **)&handle, true);
vnodeBuildNoResultQueryRsp(pRet);
code = TSDB_CODE_TSC_QUERY_CANCELLED;
return code;
}
// register the qhandle to connect to quit query immediate if connection is broken
if (vnodeNotifyCurrentQhandle(pRead->rpcHandle, pRetrieve->qId, *handle, pVnode->vgId) != TSDB_CODE_SUCCESS) {
int32_t remain = atomic_sub_fetch_32(&pVnode->numOfQHandle, 1);
vError("vgId:%d, QInfo:%" PRIu64 "-%p, retrieve discarded since link is broken, conn:%p, remain qhandle:%d",
pVnode->vgId, pRetrieve->qhandle, *handle, pRead->rpcHandle, remain);
code = TSDB_CODE_RPC_NETWORK_UNAVAIL;
qKillQuery(*handle);
qReleaseQInfo(pVnode->qMgmt, (void **)&handle, true);
return code;
}
bool freeHandle = true;
bool buildRes = false;
code = qRetrieveQueryResultInfo(*handle, &buildRes, pRead->rpcHandle);
if (code != TSDB_CODE_SUCCESS) {
// TODO handle malloc failure
pRet->rsp = (SRetrieveTableRsp *)rpcMallocCont(sizeof(SRetrieveTableRsp));
pRet->len = sizeof(SRetrieveTableRsp);
memset(pRet->rsp, 0, sizeof(SRetrieveTableRsp));
freeHandle = true;
} else { // result is not ready, return immediately
// Only affects the non-blocking model
if (!tsRetrieveBlockingModel) {
if (!buildRes) {
assert(pRead->rpcHandle != NULL);
qReleaseQInfo(pVnode->qMgmt, (void **)&handle, false);
return TSDB_CODE_QRY_NOT_READY;
}
}
// ahandle is the sqlObj pointer
code = vnodeDumpQueryResult(pRet, pVnode, pRetrieve->qId, handle, &freeHandle, pRead->rpcHandle);
}
// If qhandle is not added into vread queue, the query should be completed already or paused with error.
// Here free qhandle immediately
if (freeHandle) {
int32_t remain = atomic_sub_fetch_32(&pVnode->numOfQHandle, 1);
vTrace("vgId:%d, QInfo:%p, start to free qhandle, remain qhandle:%d", pVnode->vgId, *handle, remain);
qReleaseQInfo(pVnode->qMgmt, (void **)&handle, true);
}
return code;
#endif
return 0;
}
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#define _DEFAULT_SOURCE
#include "os.h"
#include "vnodeMain.h"
#include "vnodeWrite.h"
#include "vnodeWriteMsg.h"
typedef int32_t (*WriteMsgFp)(SVnode *, void *pCont, SVnRsp *);
typedef struct {
int32_t code;
int8_t qtype;
SVnode * pVnode;
SRpcMsg rpcMsg;
SVnRsp rspRet;
char reserveForSync[24];
SWalHead walHead;
} SVnWriteMsg;
static struct {
SWriteWorkerPool pool;
int64_t queuedBytes;
int32_t queuedMsgs;
} tsVwrite = {0};
void vnodeStartWrite(SVnode *pVnode) {}
void vnodeStoprite(SVnode *pVnode) {}
void vnodeWaitWriteCompleted(SVnode *pVnode) {
while (pVnode->queuedWMsg > 0) {
vTrace("vgId:%d, queued wmsg num:%d", pVnode->vgId, pVnode->queuedWMsg);
taosMsleep(10);
}
}
static int32_t vnodeWriteToWQueue(SVnode *pVnode, SWalHead *pHead, int32_t qtype, SRpcMsg *pRpcMsg) {
if (!(pVnode->accessState & TSDB_VN_WRITE_ACCCESS)) {
vWarn("vgId:%d, no write auth", pVnode->vgId);
return TSDB_CODE_VND_NO_WRITE_AUTH;
}
if (tsAvailDataDirGB <= tsMinimalDataDirGB) {
vWarn("vgId:%d, failed to write into vwqueue since no diskspace, avail:%fGB", pVnode->vgId, tsAvailDataDirGB);
return TSDB_CODE_VND_NO_DISKSPACE;
}
if (pHead->len > TSDB_MAX_WAL_SIZE) {
vError("vgId:%d, wal len:%d exceeds limit, hver:%" PRIu64, pVnode->vgId, pHead->len, pHead->version);
return TSDB_CODE_WAL_SIZE_LIMIT;
}
if (tsVwrite.queuedBytes > tsMaxVnodeQueuedBytes) {
vDebug("vgId:%d, too many bytes:%" PRId64 " in vwqueue, flow control", pVnode->vgId, tsVwrite.queuedBytes);
return TSDB_CODE_VND_IS_FLOWCTRL;
}
int32_t size = sizeof(SVnWriteMsg) + pHead->len;
SVnWriteMsg *pWrite = taosAllocateQitem(size);
if (pWrite == NULL) {
return TSDB_CODE_VND_OUT_OF_MEMORY;
}
if (pRpcMsg != NULL) {
pWrite->rpcMsg = *pRpcMsg;
}
memcpy(&pWrite->walHead, pHead, sizeof(SWalHead) + pHead->len);
pWrite->pVnode = pVnode;
pWrite->qtype = qtype;
atomic_add_fetch_64(&tsVwrite.queuedBytes, size);
atomic_add_fetch_32(&tsVwrite.queuedMsgs, 1);
atomic_add_fetch_32(&pVnode->refCount, 1);
atomic_add_fetch_32(&pVnode->queuedWMsg, 1);
taosWriteQitem(pVnode->pWriteQ, pWrite->qtype, pWrite);
return TSDB_CODE_SUCCESS;
}
static void vnodeFreeFromWQueue(SVnode *pVnode, SVnWriteMsg *pWrite) {
int64_t size = sizeof(SVnWriteMsg) + pWrite->walHead.len;
atomic_sub_fetch_64(&tsVwrite.queuedBytes, size);
atomic_sub_fetch_32(&tsVwrite.queuedMsgs, 1);
atomic_sub_fetch_32(&pVnode->queuedWMsg, 1);
taosFreeQitem(pWrite);
vnodeRelease(pVnode);
}
int32_t vnodeProcessWalMsg(SVnode *pVnode, SWalHead *pHead) {
return vnodeWriteToWQueue(pVnode, pHead, TAOS_QTYPE_WAL, NULL);
}
void vnodeProcessWriteMsg(SRpcMsg *pRpcMsg) {
int32_t code;
SMsgHead *pMsg = pRpcMsg->pCont;
pMsg->vgId = htonl(pMsg->vgId);
pMsg->contLen = htonl(pMsg->contLen);
SVnode *pVnode = vnodeAcquire(pMsg->vgId);
if (pVnode == NULL) {
code = TSDB_CODE_VND_INVALID_VGROUP_ID;
} else {
SWalHead *pHead = (SWalHead *)((char *)pRpcMsg->pCont - sizeof(SWalHead));
pHead->msgType = pRpcMsg->msgType;
pHead->version = 0;
pHead->len = pMsg->contLen;
code = vnodeWriteToWQueue(pVnode, pHead, TAOS_QTYPE_RPC, pRpcMsg);
}
if (code != TSDB_CODE_SUCCESS) {
SRpcMsg rpcRsp = {.handle = pRpcMsg->handle, .code = code};
rpcSendResponse(&rpcRsp);
}
vnodeRelease(pVnode);
rpcFreeCont(pRpcMsg->pCont);
}
static bool vnodeProcessWriteStart(SVnode *pVnode, SVnWriteMsg *pWrite, int32_t qtype) {
SWalHead *pHead = &pWrite->walHead;
SVnRsp * pRet = &pWrite->rspRet;
int32_t msgType = pHead->msgType;
vTrace("vgId:%d, msg:%s will be processed, hver:%" PRIu64, pVnode->vgId, taosMsg[pHead->msgType], pHead->version);
// write into WAL
#if 0
pWrite->code = walWrite(pVnode->wal, pHead);
if (pWrite->code < 0) return false;
pVnode->version = pHead->version;
#endif
// write data locally
switch (msgType) {
case TSDB_MSG_TYPE_SUBMIT:
pRet->len = sizeof(SSubmitRsp);
pRet->rsp = rpcMallocCont(pRet->len);
pWrite->code = vnodeProcessSubmitReq(pVnode, (void*)pHead->cont, pRet->rsp);
break;
case TSDB_MSG_TYPE_MD_CREATE_TABLE:
pWrite->code = vnodeProcessCreateTableReq(pVnode, (void*)pHead->cont, NULL);
break;
case TSDB_MSG_TYPE_MD_DROP_TABLE:
pWrite->code = vnodeProcessDropTableReq(pVnode, (void*)pHead->cont, NULL);
break;
case TSDB_MSG_TYPE_MD_ALTER_TABLE:
pWrite->code = vnodeProcessAlterTableReq(pVnode, (void*)pHead->cont, NULL);
break;
case TSDB_MSG_TYPE_MD_DROP_STABLE:
pWrite->code = vnodeProcessDropStableReq(pVnode, (void*)pHead->cont, NULL);
break;
case TSDB_MSG_TYPE_UPDATE_TAG_VAL:
pWrite->code = vnodeProcessUpdateTagValReq(pVnode, (void*)pHead->cont, NULL);
break;
//mq related
case TSDB_MSG_TYPE_MQ_CONNECT:
pWrite->code = vnodeProcessMqConnectReq(pVnode, (void*)pHead->cont, NULL);
break;
case TSDB_MSG_TYPE_MQ_DISCONNECT:
pWrite->code = vnodeProcessMqDisconnectReq(pVnode, (void*)pHead->cont, NULL);
break;
case TSDB_MSG_TYPE_MQ_ACK:
pWrite->code = vnodeProcessMqAckReq(pVnode, (void*)pHead->cont, NULL);
break;
case TSDB_MSG_TYPE_MQ_RESET:
pWrite->code = vnodeProcessMqResetReq(pVnode, (void*)pHead->cont, NULL);
break;
//mq related end
default:
pWrite->code = TSDB_CODE_VND_MSG_NOT_PROCESSED;
break;
}
if (pWrite->code < 0) return false;
// update fsync
return (pWrite->code == 0 && msgType != TSDB_MSG_TYPE_SUBMIT);
}
static void vnodeFsync(SVnode *pVnode, bool fsync) {
#if 0
walFsync(pVnode->wal, fsync);
#endif
}
static void vnodeProcessWriteEnd(SVnode *pVnode, SVnWriteMsg *pWrite, int32_t qtype, int32_t code) {
if (qtype == TAOS_QTYPE_RPC) {
SRpcMsg rpcRsp = {
.handle = pWrite->rpcMsg.handle,
.pCont = pWrite->rspRet.rsp,
.contLen = pWrite->rspRet.len,
.code = pWrite->code,
};
rpcSendResponse(&rpcRsp);
} else {
if (pWrite->rspRet.rsp) {
rpcFreeCont(pWrite->rspRet.rsp);
}
}
vnodeFreeFromWQueue(pVnode, pWrite);
}
int32_t vnodeInitWrite() {
SWriteWorkerPool *pPool = &tsVwrite.pool;
pPool->name = "vwrite";
pPool->max = tsNumOfCores;
pPool->startFp = (ProcessWriteStartFp)vnodeProcessWriteStart;
pPool->syncFp = (ProcessWriteSyncFp)vnodeFsync;
pPool->endFp = (ProcessWriteEndFp)vnodeProcessWriteEnd;
if (tWriteWorkerInit(pPool) != 0) return -1;
vInfo("vwrite is initialized, max worker %d", pPool->max);
return TSDB_CODE_SUCCESS;
}
void vnodeCleanupWrite() {
tWriteWorkerCleanup(&tsVwrite.pool);
vInfo("vwrite is closed");
}
taos_queue vnodeAllocWriteQueue(SVnode *pVnode) { return tWriteWorkerAllocQueue(&tsVwrite.pool, pVnode); }
void vnodeFreeWriteQueue(taos_queue pQueue) { tWriteWorkerFreeQueue(&tsVwrite.pool, pQueue); }
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#define _DEFAULT_SOURCE
#include "os.h"
#include "vnodeWriteMsg.h"
int32_t vnodeProcessSubmitReq(SVnode *pVnode, SSubmitReq *pReq, SSubmitRsp *pRsp) {
// TODO: Check inputs
#if 0
void *pMem = NULL;
if ((pMem = amalloc(pVnode->allocator, REQ_SIZE(pReq))) == NULL) {
// No more memory to allocate, schedule an async commit
// and continue
vnodeAsyncCommit(pVnode);
// Reset allocator and allocat more
vnodeResetAllocator(pVnode);
pMem = amalloc(pVnode->allocator, REQ_SIZE(pReq));
if (pMem == NULL) {
// TODO: handle the error
}
}
// TODO: if SSubmitReq is compressed or encoded, we need to decode the request
memcpy(pMem, pReq, REQ_SIZE(pReq));
if (tqPushMsg((SSubmitReq *)pReq) < 0) {
// TODO: handle error
}
SSubmitReqReader reader;
taosInitSubmitReqReader(&reader, (SSubmitReq *)pMem);
if (tsdbInsert(pVnode->pTsdb, (SSubmitReq *)pMem) < 0) {
// TODO: handler error
}
#endif
return 0;
}
int32_t vnodeProcessCreateTableReq(SVnode *pVnode, SCreateTableReq *pReq, SCreateTableRsp *pRsp) {
// TODO
return 0;
}
int32_t vnodeProcessDropTableReq(SVnode *pVnode, SDropTableReq *pReq, SDropTableRsp *pRsp) {
// TODO
return 0;
}
int32_t vnodeProcessAlterTableReq(SVnode *pVnode, SAlterTableReq *pReq, SAlterTableRsp *pRsp) {
// TODO
return 0;
}
int32_t vnodeProcessDropStableReq(SVnode *pVnode, SDropStableReq *pReq, SDropStableRsp *pRsp) {
// TODO
return 0;
}
int32_t vnodeProcessUpdateTagValReq(SVnode *pVnode, SUpdateTagValReq *pReq, SUpdateTagValRsp *pRsp) {
// TODO
return 0;
}
//mq related
int32_t vnodeProcessMqConnectReq(SVnode* pVnode, SMqConnectReq *pReq, SMqConnectRsp *pRsp){
return 0;
}
int32_t vnodeProcessMqDisconnectReq(SVnode* pVnode, SMqConnectReq *pReq, SMqConnectRsp *pRsp) {
return 0;
}
int32_t vnodeProcessMqAckReq(SVnode* pVnode, SMqAckReq *pReq, SMqAckRsp *pRsp) {
return 0;
}
int32_t vnodeProcessMqResetReq(SVnode* pVnode, SMqResetReq *pReq, SMqResetRsp *pRsp) {
return 0;
}
//mq related end
...@@ -19,7 +19,6 @@ ...@@ -19,7 +19,6 @@
#include "tqueue.h" #include "tqueue.h"
typedef struct STaosQnode { typedef struct STaosQnode {
int type;
struct STaosQnode *next; struct STaosQnode *next;
char item[]; char item[];
} STaosQnode; } STaosQnode;
...@@ -32,6 +31,8 @@ typedef struct STaosQueue { ...@@ -32,6 +31,8 @@ typedef struct STaosQueue {
struct STaosQueue *next; // for queue set struct STaosQueue *next; // for queue set
struct STaosQset *qset; // for queue set struct STaosQset *qset; // for queue set
void *ahandle; // for queue set void *ahandle; // for queue set
FProcessItem itemFp;
FProcessItems itemsFp;
pthread_mutex_t mutex; pthread_mutex_t mutex;
} STaosQueue; } STaosQueue;
...@@ -52,8 +53,7 @@ typedef struct STaosQall { ...@@ -52,8 +53,7 @@ typedef struct STaosQall {
} STaosQall; } STaosQall;
taos_queue taosOpenQueue() { taos_queue taosOpenQueue() {
STaosQueue *queue = (STaosQueue *)calloc(sizeof(STaosQueue), 1);
STaosQueue *queue = (STaosQueue *) calloc(sizeof(STaosQueue), 1);
if (queue == NULL) { if (queue == NULL) {
terrno = TSDB_CODE_COM_OUT_OF_MEMORY; terrno = TSDB_CODE_COM_OUT_OF_MEMORY;
return NULL; return NULL;
...@@ -65,6 +65,13 @@ taos_queue taosOpenQueue() { ...@@ -65,6 +65,13 @@ taos_queue taosOpenQueue() {
return queue; return queue;
} }
void taosSetQueueFp(taos_queue param, FProcessItem itemFp, FProcessItems itemsFp) {
if (param == NULL) return;
STaosQueue *queue = (STaosQueue *)param;
queue->itemFp = itemFp;
queue->itemsFp = itemsFp;
}
void taosCloseQueue(taos_queue param) { void taosCloseQueue(taos_queue param) {
if (param == NULL) return; if (param == NULL) return;
STaosQueue *queue = (STaosQueue *)param; STaosQueue *queue = (STaosQueue *)param;
...@@ -82,7 +89,7 @@ void taosCloseQueue(taos_queue param) { ...@@ -82,7 +89,7 @@ void taosCloseQueue(taos_queue param) {
while (pNode) { while (pNode) {
pTemp = pNode; pTemp = pNode;
pNode = pNode->next; pNode = pNode->next;
free (pTemp); free(pTemp);
} }
pthread_mutex_destroy(&queue->mutex); pthread_mutex_destroy(&queue->mutex);
...@@ -108,10 +115,9 @@ void taosFreeQitem(void *param) { ...@@ -108,10 +115,9 @@ void taosFreeQitem(void *param) {
free(temp); free(temp);
} }
int taosWriteQitem(taos_queue param, int type, void *item) { int taosWriteQitem(taos_queue param, void *item) {
STaosQueue *queue = (STaosQueue *)param; STaosQueue *queue = (STaosQueue *)param;
STaosQnode *pNode = (STaosQnode *)(((char *)item) - sizeof(STaosQnode)); STaosQnode *pNode = (STaosQnode *)(((char *)item) - sizeof(STaosQnode));
pNode->type = type;
pNode->next = NULL; pNode->next = NULL;
pthread_mutex_lock(&queue->mutex); pthread_mutex_lock(&queue->mutex);
...@@ -126,7 +132,7 @@ int taosWriteQitem(taos_queue param, int type, void *item) { ...@@ -126,7 +132,7 @@ int taosWriteQitem(taos_queue param, int type, void *item) {
queue->numOfItems++; queue->numOfItems++;
if (queue->qset) atomic_add_fetch_32(&queue->qset->numOfItems, 1); if (queue->qset) atomic_add_fetch_32(&queue->qset->numOfItems, 1);
uTrace("item:%p is put into queue:%p, type:%d items:%d", item, queue, type, queue->numOfItems); uTrace("item:%p is put into queue:%p, items:%d", item, queue, queue->numOfItems);
pthread_mutex_unlock(&queue->mutex); pthread_mutex_unlock(&queue->mutex);
...@@ -135,7 +141,7 @@ int taosWriteQitem(taos_queue param, int type, void *item) { ...@@ -135,7 +141,7 @@ int taosWriteQitem(taos_queue param, int type, void *item) {
return 0; return 0;
} }
int taosReadQitem(taos_queue param, int *type, void **pitem) { int taosReadQitem(taos_queue param, void **pitem) {
STaosQueue *queue = (STaosQueue *)param; STaosQueue *queue = (STaosQueue *)param;
STaosQnode *pNode = NULL; STaosQnode *pNode = NULL;
int code = 0; int code = 0;
...@@ -145,14 +151,12 @@ int taosReadQitem(taos_queue param, int *type, void **pitem) { ...@@ -145,14 +151,12 @@ int taosReadQitem(taos_queue param, int *type, void **pitem) {
if (queue->head) { if (queue->head) {
pNode = queue->head; pNode = queue->head;
*pitem = pNode->item; *pitem = pNode->item;
*type = pNode->type;
queue->head = pNode->next; queue->head = pNode->next;
if (queue->head == NULL) if (queue->head == NULL) queue->tail = NULL;
queue->tail = NULL;
queue->numOfItems--; queue->numOfItems--;
if (queue->qset) atomic_sub_fetch_32(&queue->qset->numOfItems, 1); if (queue->qset) atomic_sub_fetch_32(&queue->qset->numOfItems, 1);
code = 1; code = 1;
uDebug("item:%p is read out from queue:%p, type:%d items:%d", *pitem, queue, *type, queue->numOfItems); uDebug("item:%p is read out from queue:%p, items:%d", *pitem, queue, queue->numOfItems);
} }
pthread_mutex_unlock(&queue->mutex); pthread_mutex_unlock(&queue->mutex);
...@@ -165,9 +169,7 @@ void *taosAllocateQall() { ...@@ -165,9 +169,7 @@ void *taosAllocateQall() {
return p; return p;
} }
void taosFreeQall(void *param) { void taosFreeQall(void *param) { free(param); }
free(param);
}
int taosReadAllQitems(taos_queue param, taos_qall p2) { int taosReadAllQitems(taos_queue param, taos_qall p2) {
STaosQueue *queue = (STaosQueue *)param; STaosQueue *queue = (STaosQueue *)param;
...@@ -203,20 +205,18 @@ int taosReadAllQitems(taos_queue param, taos_qall p2) { ...@@ -203,20 +205,18 @@ int taosReadAllQitems(taos_queue param, taos_qall p2) {
return code; return code;
} }
int taosGetQitem(taos_qall param, int *type, void **pitem) { int taosGetQitem(taos_qall param, void **pitem) {
STaosQall *qall = (STaosQall *)param; STaosQall *qall = (STaosQall *)param;
STaosQnode *pNode; STaosQnode *pNode;
int num = 0; int num = 0;
pNode = qall->current; pNode = qall->current;
if (pNode) if (pNode) qall->current = pNode->next;
qall->current = pNode->next;
if (pNode) { if (pNode) {
*pitem = pNode->item; *pitem = pNode->item;
*type = pNode->type;
num = 1; num = 1;
uTrace("item:%p is fetched, type:%d", *pitem, *type); uTrace("item:%p is fetched", *pitem);
} }
return num; return num;
...@@ -228,8 +228,7 @@ void taosResetQitems(taos_qall param) { ...@@ -228,8 +228,7 @@ void taosResetQitems(taos_qall param) {
} }
taos_qset taosOpenQset() { taos_qset taosOpenQset() {
STaosQset *qset = (STaosQset *)calloc(sizeof(STaosQset), 1);
STaosQset *qset = (STaosQset *) calloc(sizeof(STaosQset), 1);
if (qset == NULL) { if (qset == NULL) {
terrno = TSDB_CODE_COM_OUT_OF_MEMORY; terrno = TSDB_CODE_COM_OUT_OF_MEMORY;
return NULL; return NULL;
...@@ -313,7 +312,7 @@ void taosRemoveFromQset(taos_qset p1, taos_queue p2) { ...@@ -313,7 +312,7 @@ void taosRemoveFromQset(taos_qset p1, taos_queue p2) {
tqueue = qset->head->next; tqueue = qset->head->next;
while (tqueue) { while (tqueue) {
assert(tqueue->qset); assert(tqueue->qset);
if (tqueue== queue) { if (tqueue == queue) {
prev->next = tqueue->next; prev->next = tqueue->next;
break; break;
} else { } else {
...@@ -340,11 +339,9 @@ void taosRemoveFromQset(taos_qset p1, taos_queue p2) { ...@@ -340,11 +339,9 @@ void taosRemoveFromQset(taos_qset p1, taos_queue p2) {
uTrace("queue:%p is removed from qset:%p", queue, qset); uTrace("queue:%p is removed from qset:%p", queue, qset);
} }
int taosGetQueueNumber(taos_qset param) { int taosGetQueueNumber(taos_qset param) { return ((STaosQset *)param)->numOfQueues; }
return ((STaosQset *)param)->numOfQueues;
}
int taosReadQitemFromQset(taos_qset param, int *type, void **pitem, void **phandle) { int taosReadQitemFromQset(taos_qset param, void **pitem, void **ahandle, FProcessItem *itemFp) {
STaosQset *qset = (STaosQset *)param; STaosQset *qset = (STaosQset *)param;
STaosQnode *pNode = NULL; STaosQnode *pNode = NULL;
int code = 0; int code = 0;
...@@ -353,9 +350,8 @@ int taosReadQitemFromQset(taos_qset param, int *type, void **pitem, void **phand ...@@ -353,9 +350,8 @@ int taosReadQitemFromQset(taos_qset param, int *type, void **pitem, void **phand
pthread_mutex_lock(&qset->mutex); pthread_mutex_lock(&qset->mutex);
for(int i=0; i<qset->numOfQueues; ++i) { for (int i = 0; i < qset->numOfQueues; ++i) {
if (qset->current == NULL) if (qset->current == NULL) qset->current = qset->head;
qset->current = qset->head;
STaosQueue *queue = qset->current; STaosQueue *queue = qset->current;
if (queue) qset->current = queue->next; if (queue) qset->current = queue->next;
if (queue == NULL) break; if (queue == NULL) break;
...@@ -366,15 +362,14 @@ int taosReadQitemFromQset(taos_qset param, int *type, void **pitem, void **phand ...@@ -366,15 +362,14 @@ int taosReadQitemFromQset(taos_qset param, int *type, void **pitem, void **phand
if (queue->head) { if (queue->head) {
pNode = queue->head; pNode = queue->head;
*pitem = pNode->item; *pitem = pNode->item;
if (type) *type = pNode->type; if (ahandle) *ahandle = queue->ahandle;
if (phandle) *phandle = queue->ahandle; if (itemFp) *itemFp = queue->itemFp;
queue->head = pNode->next; queue->head = pNode->next;
if (queue->head == NULL) if (queue->head == NULL) queue->tail = NULL;
queue->tail = NULL;
queue->numOfItems--; queue->numOfItems--;
atomic_sub_fetch_32(&qset->numOfItems, 1); atomic_sub_fetch_32(&qset->numOfItems, 1);
code = 1; code = 1;
uTrace("item:%p is read out from queue:%p, type:%d items:%d", *pitem, queue, pNode->type, queue->numOfItems); uTrace("item:%p is read out from queue:%p, items:%d", *pitem, queue, queue->numOfItems);
} }
pthread_mutex_unlock(&queue->mutex); pthread_mutex_unlock(&queue->mutex);
...@@ -386,7 +381,7 @@ int taosReadQitemFromQset(taos_qset param, int *type, void **pitem, void **phand ...@@ -386,7 +381,7 @@ int taosReadQitemFromQset(taos_qset param, int *type, void **pitem, void **phand
return code; return code;
} }
int taosReadAllQitemsFromQset(taos_qset param, taos_qall p2, void **phandle) { int taosReadAllQitemsFromQset(taos_qset param, taos_qall p2, void **ahandle, FProcessItems *itemsFp) {
STaosQset *qset = (STaosQset *)param; STaosQset *qset = (STaosQset *)param;
STaosQueue *queue; STaosQueue *queue;
STaosQall *qall = (STaosQall *)p2; STaosQall *qall = (STaosQall *)p2;
...@@ -411,7 +406,8 @@ int taosReadAllQitemsFromQset(taos_qset param, taos_qall p2, void **phandle) { ...@@ -411,7 +406,8 @@ int taosReadAllQitemsFromQset(taos_qset param, taos_qall p2, void **phandle) {
qall->numOfItems = queue->numOfItems; qall->numOfItems = queue->numOfItems;
qall->itemSize = queue->itemSize; qall->itemSize = queue->itemSize;
code = qall->numOfItems; code = qall->numOfItems;
*phandle = queue->ahandle; if (ahandle) *ahandle = queue->ahandle;
if (itemsFp) *itemsFp = queue->itemsFp;
queue->head = NULL; queue->head = NULL;
queue->tail = NULL; queue->tail = NULL;
......
...@@ -59,10 +59,10 @@ void tWorkerCleanup(SWorkerPool *pool) { ...@@ -59,10 +59,10 @@ void tWorkerCleanup(SWorkerPool *pool) {
static void *tWorkerThreadFp(SWorker *worker) { static void *tWorkerThreadFp(SWorker *worker) {
SWorkerPool *pool = worker->pool; SWorkerPool *pool = worker->pool;
FProcessItem fp = NULL;
void * msg = NULL; void *msg = NULL;
void * ahandle = NULL; void *ahandle = NULL;
int32_t qtype = 0;
int32_t code = 0; int32_t code = 0;
taosBlockSIGPIPE(); taosBlockSIGPIPE();
...@@ -70,19 +70,20 @@ static void *tWorkerThreadFp(SWorker *worker) { ...@@ -70,19 +70,20 @@ static void *tWorkerThreadFp(SWorker *worker) {
uDebug("worker:%s:%d is running", pool->name, worker->id); uDebug("worker:%s:%d is running", pool->name, worker->id);
while (1) { while (1) {
if (taosReadQitemFromQset(pool->qset, &qtype, (void **)&msg, &ahandle) == 0) { if (taosReadQitemFromQset(pool->qset, (void **)&msg, &ahandle, &fp) == 0) {
uDebug("worker:%s:%d qset:%p, got no message and exiting", pool->name, worker->id, pool->qset); uDebug("worker:%s:%d qset:%p, got no message and exiting", pool->name, worker->id, pool->qset);
break; break;
} }
code = (*pool->startFp)(ahandle, msg, qtype); if (fp) {
if (pool->endFp) (*pool->endFp)(ahandle, msg, qtype, code); (*fp)(msg, ahandle);
}
} }
return NULL; return NULL;
} }
taos_queue tWorkerAllocQueue(SWorkerPool *pool, void *ahandle) { taos_queue tWorkerAllocQueue(SWorkerPool *pool, void *ahandle, FProcessItem fp) {
pthread_mutex_lock(&pool->mutex); pthread_mutex_lock(&pool->mutex);
taos_queue queue = taosOpenQueue(); taos_queue queue = taosOpenQueue();
if (queue == NULL) { if (queue == NULL) {
...@@ -90,6 +91,7 @@ taos_queue tWorkerAllocQueue(SWorkerPool *pool, void *ahandle) { ...@@ -90,6 +91,7 @@ taos_queue tWorkerAllocQueue(SWorkerPool *pool, void *ahandle) {
return NULL; return NULL;
} }
taosSetQueueFp(queue, fp, NULL);
taosAddIntoQset(pool->qset, queue, ahandle); taosAddIntoQset(pool->qset, queue, ahandle);
// spawn a thread to process queue // spawn a thread to process queue
...@@ -122,14 +124,14 @@ void tWorkerFreeQueue(SWorkerPool *pool, void *queue) { ...@@ -122,14 +124,14 @@ void tWorkerFreeQueue(SWorkerPool *pool, void *queue) {
uDebug("worker:%s, queue:%p is freed", pool->name, queue); uDebug("worker:%s, queue:%p is freed", pool->name, queue);
} }
int32_t tWriteWorkerInit(SWriteWorkerPool *pool) { int32_t tMWorkerInit(SMWorkerPool *pool) {
pool->nextId = 0; pool->nextId = 0;
pool->workers = calloc(sizeof(SWriteWorker), pool->max); pool->workers = calloc(sizeof(SMWorker), pool->max);
if (pool->workers == NULL) return -1; if (pool->workers == NULL) return -1;
pthread_mutex_init(&pool->mutex, NULL); pthread_mutex_init(&pool->mutex, NULL);
for (int32_t i = 0; i < pool->max; ++i) { for (int32_t i = 0; i < pool->max; ++i) {
SWriteWorker *worker = pool->workers + i; SMWorker *worker = pool->workers + i;
worker->id = i; worker->id = i;
worker->qall = NULL; worker->qall = NULL;
worker->qset = NULL; worker->qset = NULL;
...@@ -140,16 +142,16 @@ int32_t tWriteWorkerInit(SWriteWorkerPool *pool) { ...@@ -140,16 +142,16 @@ int32_t tWriteWorkerInit(SWriteWorkerPool *pool) {
return 0; return 0;
} }
void tWriteWorkerCleanup(SWriteWorkerPool *pool) { void tMWorkerCleanup(SMWorkerPool *pool) {
for (int32_t i = 0; i < pool->max; ++i) { for (int32_t i = 0; i < pool->max; ++i) {
SWriteWorker *worker = pool->workers + i; SMWorker *worker = pool->workers + i;
if (taosCheckPthreadValid(worker->thread)) { if (taosCheckPthreadValid(worker->thread)) {
if (worker->qset) taosQsetThreadResume(worker->qset); if (worker->qset) taosQsetThreadResume(worker->qset);
} }
} }
for (int32_t i = 0; i < pool->max; ++i) { for (int32_t i = 0; i < pool->max; ++i) {
SWriteWorker *worker = pool->workers + i; SMWorker *worker = pool->workers + i;
if (taosCheckPthreadValid(worker->thread)) { if (taosCheckPthreadValid(worker->thread)) {
pthread_join(worker->thread, NULL); pthread_join(worker->thread, NULL);
taosFreeQall(worker->qall); taosFreeQall(worker->qall);
...@@ -163,11 +165,12 @@ void tWriteWorkerCleanup(SWriteWorkerPool *pool) { ...@@ -163,11 +165,12 @@ void tWriteWorkerCleanup(SWriteWorkerPool *pool) {
uInfo("worker:%s is closed", pool->name); uInfo("worker:%s is closed", pool->name);
} }
static void *tWriteWorkerThreadFp(SWriteWorker *worker) { static void *tWriteWorkerThreadFp(SMWorker *worker) {
SWriteWorkerPool *pool = worker->pool; SMWorkerPool *pool = worker->pool;
FProcessItems fp = NULL;
void * msg = NULL; void *msg = NULL;
void * ahandle = NULL; void *ahandle = NULL;
int32_t numOfMsgs = 0; int32_t numOfMsgs = 0;
int32_t qtype = 0; int32_t qtype = 0;
...@@ -176,34 +179,23 @@ static void *tWriteWorkerThreadFp(SWriteWorker *worker) { ...@@ -176,34 +179,23 @@ static void *tWriteWorkerThreadFp(SWriteWorker *worker) {
uDebug("worker:%s:%d is running", pool->name, worker->id); uDebug("worker:%s:%d is running", pool->name, worker->id);
while (1) { while (1) {
numOfMsgs = taosReadAllQitemsFromQset(worker->qset, worker->qall, &ahandle); numOfMsgs = taosReadAllQitemsFromQset(worker->qset, worker->qall, &ahandle, &fp);
if (numOfMsgs == 0) { if (numOfMsgs == 0) {
uDebug("worker:%s:%d qset:%p, got no message and exiting", pool->name, worker->id, worker->qset); uDebug("worker:%s:%d qset:%p, got no message and exiting", pool->name, worker->id, worker->qset);
break; break;
} }
bool fsync = false; if (fp) {
for (int32_t i = 0; i < numOfMsgs; ++i) { (*fp)(worker->qall, numOfMsgs, ahandle);
taosGetQitem(worker->qall, &qtype, (void **)&msg);
fsync = fsync | (*pool->startFp)(ahandle, msg, qtype);
}
(*pool->syncFp)(ahandle, fsync);
// browse all items, and process them one by one
taosResetQitems(worker->qall);
for (int32_t i = 0; i < numOfMsgs; ++i) {
taosGetQitem(worker->qall, &qtype, (void **)&msg);
(*pool->endFp)(ahandle, msg, qtype);
} }
} }
return NULL; return NULL;
} }
taos_queue tWriteWorkerAllocQueue(SWriteWorkerPool *pool, void *ahandle) { taos_queue tMWorkerAllocQueue(SMWorkerPool *pool, void *ahandle, FProcessItems fp) {
pthread_mutex_lock(&pool->mutex); pthread_mutex_lock(&pool->mutex);
SWriteWorker *worker = pool->workers + pool->nextId; SMWorker *worker = pool->workers + pool->nextId;
taos_queue *queue = taosOpenQueue(); taos_queue *queue = taosOpenQueue();
if (queue == NULL) { if (queue == NULL) {
...@@ -211,6 +203,8 @@ taos_queue tWriteWorkerAllocQueue(SWriteWorkerPool *pool, void *ahandle) { ...@@ -211,6 +203,8 @@ taos_queue tWriteWorkerAllocQueue(SWriteWorkerPool *pool, void *ahandle) {
return NULL; return NULL;
} }
taosSetQueueFp(queue, NULL, fp);
if (worker->qset == NULL) { if (worker->qset == NULL) {
worker->qset = taosOpenQset(); worker->qset = taosOpenQset();
if (worker->qset == NULL) { if (worker->qset == NULL) {
...@@ -254,7 +248,7 @@ taos_queue tWriteWorkerAllocQueue(SWriteWorkerPool *pool, void *ahandle) { ...@@ -254,7 +248,7 @@ taos_queue tWriteWorkerAllocQueue(SWriteWorkerPool *pool, void *ahandle) {
return queue; return queue;
} }
void tWriteWorkerFreeQueue(SWriteWorkerPool *pool, taos_queue queue) { void tMWorkerFreeQueue(SMWorkerPool *pool, taos_queue queue) {
taosCloseQueue(queue); taosCloseQueue(queue);
uDebug("worker:%s, queue:%p is freed", pool->name, queue); uDebug("worker:%s, queue:%p is freed", pool->name, queue);
} }
...@@ -1166,7 +1166,7 @@ int32_t tscBuildCreateDbMsg(SSqlObj *pSql, SSqlInfo *pInfo) { ...@@ -1166,7 +1166,7 @@ int32_t tscBuildCreateDbMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SSqlCmd *pCmd = &pSql->cmd; SSqlCmd *pCmd = &pSql->cmd;
pCmd->payloadLen = sizeof(SCreateDbMsg); pCmd->payloadLen = sizeof(SCreateDbMsg);
pCmd->msgType = (pInfo->pMiscInfo->dbOpt.dbType == TSDB_DB_TYPE_DEFAULT) ? TSDB_MSG_TYPE_CM_CREATE_DB : TSDB_MSG_TYPE_CM_CREATE_TP; pCmd->msgType = (pInfo->pMiscInfo->dbOpt.dbType == TSDB_DB_TYPE_DEFAULT) ? TSDB_MSG_TYPE_CREATE_DB : TSDB_MSG_TYPE_CREATE_TP;
SCreateDbMsg *pCreateDbMsg = (SCreateDbMsg *)pCmd->payload; SCreateDbMsg *pCreateDbMsg = (SCreateDbMsg *)pCmd->payload;
...@@ -1182,7 +1182,7 @@ int32_t tscBuildCreateFuncMsg(SSqlObj *pSql, SSqlInfo *pInfo) { ...@@ -1182,7 +1182,7 @@ int32_t tscBuildCreateFuncMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SSqlCmd *pCmd = &pSql->cmd; SSqlCmd *pCmd = &pSql->cmd;
SCreateFuncMsg *pCreateFuncMsg = (SCreateFuncMsg *)pCmd->payload; SCreateFuncMsg *pCreateFuncMsg = (SCreateFuncMsg *)pCmd->payload;
pCmd->msgType = TSDB_MSG_TYPE_CM_CREATE_FUNCTION; pCmd->msgType = TSDB_MSG_TYPE_CREATE_FUNCTION;
pCmd->payloadLen = sizeof(SCreateFuncMsg) + htonl(pCreateFuncMsg->codeLen); pCmd->payloadLen = sizeof(SCreateFuncMsg) + htonl(pCreateFuncMsg->codeLen);
...@@ -1203,7 +1203,7 @@ int32_t tscBuildCreateDnodeMsg(SSqlObj *pSql, SSqlInfo *pInfo) { ...@@ -1203,7 +1203,7 @@ int32_t tscBuildCreateDnodeMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SStrToken* t0 = taosArrayGet(pInfo->pMiscInfo->a, 0); SStrToken* t0 = taosArrayGet(pInfo->pMiscInfo->a, 0);
strncpy(pCreate->ep, t0->z, t0->n); strncpy(pCreate->ep, t0->z, t0->n);
pCmd->msgType = TSDB_MSG_TYPE_CM_CREATE_DNODE; pCmd->msgType = TSDB_MSG_TYPE_CREATE_DNODE;
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
...@@ -1249,7 +1249,7 @@ int32_t tscBuildAcctMsg(SSqlObj *pSql, SSqlInfo *pInfo) { ...@@ -1249,7 +1249,7 @@ int32_t tscBuildAcctMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
} }
} }
pCmd->msgType = TSDB_MSG_TYPE_CM_CREATE_ACCT; pCmd->msgType = TSDB_MSG_TYPE_CREATE_ACCT;
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
...@@ -1277,9 +1277,9 @@ int32_t tscBuildUserMsg(SSqlObj *pSql, SSqlInfo *pInfo) { ...@@ -1277,9 +1277,9 @@ int32_t tscBuildUserMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
} }
if (pUser->type == TSDB_ALTER_USER_PASSWD || pUser->type == TSDB_ALTER_USER_PRIVILEGES) { if (pUser->type == TSDB_ALTER_USER_PASSWD || pUser->type == TSDB_ALTER_USER_PRIVILEGES) {
pCmd->msgType = TSDB_MSG_TYPE_CM_ALTER_USER; pCmd->msgType = TSDB_MSG_TYPE_ALTER_USER;
} else { } else {
pCmd->msgType = TSDB_MSG_TYPE_CM_CREATE_USER; pCmd->msgType = TSDB_MSG_TYPE_CREATE_USER;
} }
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
...@@ -1288,7 +1288,7 @@ int32_t tscBuildUserMsg(SSqlObj *pSql, SSqlInfo *pInfo) { ...@@ -1288,7 +1288,7 @@ int32_t tscBuildUserMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
int32_t tscBuildCfgDnodeMsg(SSqlObj *pSql, SSqlInfo *pInfo) { int32_t tscBuildCfgDnodeMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SSqlCmd *pCmd = &pSql->cmd; SSqlCmd *pCmd = &pSql->cmd;
pCmd->payloadLen = sizeof(SCfgDnodeMsg); pCmd->payloadLen = sizeof(SCfgDnodeMsg);
pCmd->msgType = TSDB_MSG_TYPE_CM_CONFIG_DNODE; pCmd->msgType = TSDB_MSG_TYPE_CONFIG_DNODE;
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
...@@ -1310,14 +1310,14 @@ int32_t tscBuildDropDbMsg(SSqlObj *pSql, SSqlInfo *pInfo) { ...@@ -1310,14 +1310,14 @@ int32_t tscBuildDropDbMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pDropDbMsg->ignoreNotExists = pInfo->pMiscInfo->existsCheck ? 1 : 0; pDropDbMsg->ignoreNotExists = pInfo->pMiscInfo->existsCheck ? 1 : 0;
pCmd->msgType = (pInfo->pMiscInfo->dbType == TSDB_DB_TYPE_DEFAULT) ? TSDB_MSG_TYPE_CM_DROP_DB : TSDB_MSG_TYPE_CM_DROP_TP; pCmd->msgType = (pInfo->pMiscInfo->dbType == TSDB_DB_TYPE_DEFAULT) ? TSDB_MSG_TYPE_DROP_DB : TSDB_MSG_TYPE_DROP_TP;
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
int32_t tscBuildDropFuncMsg(SSqlObj *pSql, SSqlInfo *pInfo) { int32_t tscBuildDropFuncMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SSqlCmd *pCmd = &pSql->cmd; SSqlCmd *pCmd = &pSql->cmd;
pCmd->msgType = TSDB_MSG_TYPE_CM_DROP_FUNCTION; pCmd->msgType = TSDB_MSG_TYPE_DROP_FUNCTION;
pCmd->payloadLen = sizeof(SDropFuncMsg); pCmd->payloadLen = sizeof(SDropFuncMsg);
...@@ -1340,7 +1340,7 @@ int32_t tscBuildDropTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) { ...@@ -1340,7 +1340,7 @@ int32_t tscBuildDropTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pDropTableMsg->supertable = (pInfo->pMiscInfo->tableType == TSDB_SUPER_TABLE)? 1:0; pDropTableMsg->supertable = (pInfo->pMiscInfo->tableType == TSDB_SUPER_TABLE)? 1:0;
pDropTableMsg->igNotExists = pInfo->pMiscInfo->existsCheck ? 1 : 0; pDropTableMsg->igNotExists = pInfo->pMiscInfo->existsCheck ? 1 : 0;
pCmd->msgType = TSDB_MSG_TYPE_CM_DROP_TABLE; pCmd->msgType = TSDB_MSG_TYPE_DROP_TABLE;
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
...@@ -1358,7 +1358,7 @@ int32_t tscBuildDropDnodeMsg(SSqlObj *pSql, SSqlInfo *pInfo) { ...@@ -1358,7 +1358,7 @@ int32_t tscBuildDropDnodeMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SDropDnodeMsg * pDrop = (SDropDnodeMsg *)pCmd->payload; SDropDnodeMsg * pDrop = (SDropDnodeMsg *)pCmd->payload;
tstrncpy(pDrop->ep, dnodeEp, tListLen(pDrop->ep)); tstrncpy(pDrop->ep, dnodeEp, tListLen(pDrop->ep));
pCmd->msgType = TSDB_MSG_TYPE_CM_DROP_DNODE; pCmd->msgType = TSDB_MSG_TYPE_DROP_DNODE;
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
...@@ -1370,7 +1370,7 @@ int32_t tscBuildDropUserAcctMsg(SSqlObj *pSql, SSqlInfo *pInfo) { ...@@ -1370,7 +1370,7 @@ int32_t tscBuildDropUserAcctMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
tstrncpy(user, pCmd->payload, TSDB_USER_LEN); tstrncpy(user, pCmd->payload, TSDB_USER_LEN);
pCmd->payloadLen = sizeof(SDropUserMsg); pCmd->payloadLen = sizeof(SDropUserMsg);
pCmd->msgType = (pInfo->type == TSDB_SQL_DROP_USER)? TSDB_MSG_TYPE_CM_DROP_USER:TSDB_MSG_TYPE_CM_DROP_ACCT; pCmd->msgType = (pInfo->type == TSDB_SQL_DROP_USER)? TSDB_MSG_TYPE_DROP_USER:TSDB_MSG_TYPE_DROP_ACCT;
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) { if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) {
tscError("0x%"PRIx64" failed to malloc for query msg", pSql->self); tscError("0x%"PRIx64" failed to malloc for query msg", pSql->self);
...@@ -1395,7 +1395,7 @@ int32_t tscBuildUseDbMsg(SSqlObj *pSql, SSqlInfo *pInfo) { ...@@ -1395,7 +1395,7 @@ int32_t tscBuildUseDbMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SUseDbMsg *pUseDbMsg = (SUseDbMsg *)pCmd->payload; SUseDbMsg *pUseDbMsg = (SUseDbMsg *)pCmd->payload;
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, 0); STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, 0);
tNameExtractFullName(&pTableMetaInfo->name, pUseDbMsg->db); tNameExtractFullName(&pTableMetaInfo->name, pUseDbMsg->db);
pCmd->msgType = TSDB_MSG_TYPE_CM_USE_DB; pCmd->msgType = TSDB_MSG_TYPE_USE_DB;
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
...@@ -1412,14 +1412,14 @@ int32_t tscBuildSyncDbReplicaMsg(SSqlObj* pSql, SSqlInfo *pInfo) { ...@@ -1412,14 +1412,14 @@ int32_t tscBuildSyncDbReplicaMsg(SSqlObj* pSql, SSqlInfo *pInfo) {
SSyncDbMsg *pSyncMsg = (SSyncDbMsg *)pCmd->payload; SSyncDbMsg *pSyncMsg = (SSyncDbMsg *)pCmd->payload;
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, 0); STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, 0);
tNameExtractFullName(&pTableMetaInfo->name, pSyncMsg->db); tNameExtractFullName(&pTableMetaInfo->name, pSyncMsg->db);
pCmd->msgType = TSDB_MSG_TYPE_CM_SYNC_DB; pCmd->msgType = TSDB_MSG_TYPE_SYNC_DB;
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
int32_t tscBuildShowMsg(SSqlObj *pSql, SSqlInfo *pInfo) { int32_t tscBuildShowMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SSqlCmd *pCmd = &pSql->cmd; SSqlCmd *pCmd = &pSql->cmd;
pCmd->msgType = TSDB_MSG_TYPE_CM_SHOW; pCmd->msgType = TSDB_MSG_TYPE_SHOW;
pCmd->payloadLen = sizeof(SShowMsg) + 100; pCmd->payloadLen = sizeof(SShowMsg) + 100;
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) { if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) {
...@@ -1473,13 +1473,13 @@ int32_t tscBuildKillMsg(SSqlObj *pSql, SSqlInfo *pInfo) { ...@@ -1473,13 +1473,13 @@ int32_t tscBuildKillMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
switch (pCmd->command) { switch (pCmd->command) {
case TSDB_SQL_KILL_QUERY: case TSDB_SQL_KILL_QUERY:
pCmd->msgType = TSDB_MSG_TYPE_CM_KILL_QUERY; pCmd->msgType = TSDB_MSG_TYPE_KILL_QUERY;
break; break;
case TSDB_SQL_KILL_CONNECTION: case TSDB_SQL_KILL_CONNECTION:
pCmd->msgType = TSDB_MSG_TYPE_CM_KILL_CONN; pCmd->msgType = TSDB_MSG_TYPE_KILL_CONN;
break; break;
case TSDB_SQL_KILL_STREAM: case TSDB_SQL_KILL_STREAM:
pCmd->msgType = TSDB_MSG_TYPE_CM_KILL_STREAM; pCmd->msgType = TSDB_MSG_TYPE_KILL_STREAM;
break; break;
} }
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
...@@ -1592,7 +1592,7 @@ int tscBuildCreateTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) { ...@@ -1592,7 +1592,7 @@ int tscBuildCreateTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
msgLen = (int32_t)(pMsg - (char*)pCreateTableMsg); msgLen = (int32_t)(pMsg - (char*)pCreateTableMsg);
pCreateTableMsg->contLen = htonl(msgLen); pCreateTableMsg->contLen = htonl(msgLen);
pCmd->payloadLen = msgLen; pCmd->payloadLen = msgLen;
pCmd->msgType = TSDB_MSG_TYPE_CM_CREATE_TABLE; pCmd->msgType = TSDB_MSG_TYPE_CREATE_TABLE;
assert(msgLen + minMsgSize() <= size); assert(msgLen + minMsgSize() <= size);
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
...@@ -1645,7 +1645,7 @@ int tscBuildAlterTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) { ...@@ -1645,7 +1645,7 @@ int tscBuildAlterTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
msgLen = (int32_t)(pMsg - (char*)pAlterTableMsg); msgLen = (int32_t)(pMsg - (char*)pAlterTableMsg);
pCmd->payloadLen = msgLen; pCmd->payloadLen = msgLen;
pCmd->msgType = TSDB_MSG_TYPE_CM_ALTER_TABLE; pCmd->msgType = TSDB_MSG_TYPE_ALTER_TABLE;
assert(msgLen + minMsgSize() <= size); assert(msgLen + minMsgSize() <= size);
...@@ -1674,7 +1674,7 @@ int tscBuildUpdateTagMsg(SSqlObj* pSql, SSqlInfo *pInfo) { ...@@ -1674,7 +1674,7 @@ int tscBuildUpdateTagMsg(SSqlObj* pSql, SSqlInfo *pInfo) {
int tscAlterDbMsg(SSqlObj *pSql, SSqlInfo *pInfo) { int tscAlterDbMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SSqlCmd *pCmd = &pSql->cmd; SSqlCmd *pCmd = &pSql->cmd;
pCmd->payloadLen = sizeof(SAlterDbMsg); pCmd->payloadLen = sizeof(SAlterDbMsg);
pCmd->msgType = (pInfo->pMiscInfo->dbOpt.dbType == TSDB_DB_TYPE_DEFAULT) ? TSDB_MSG_TYPE_CM_ALTER_DB : TSDB_MSG_TYPE_CM_ALTER_TP; pCmd->msgType = (pInfo->pMiscInfo->dbOpt.dbType == TSDB_DB_TYPE_DEFAULT) ? TSDB_MSG_TYPE_ALTER_DB : TSDB_MSG_TYPE_ALTER_TP;
SAlterDbMsg *pAlterDbMsg = (SAlterDbMsg* )pCmd->payload; SAlterDbMsg *pAlterDbMsg = (SAlterDbMsg* )pCmd->payload;
pAlterDbMsg->dbType = -1; pAlterDbMsg->dbType = -1;
...@@ -1711,7 +1711,7 @@ int tscBuildCompactMsg(SSqlObj *pSql, SSqlInfo *pInfo) { ...@@ -1711,7 +1711,7 @@ int tscBuildCompactMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
int count = removeDupVgid(result, size); int count = removeDupVgid(result, size);
pCmd->payloadLen = sizeof(SCompactMsg) + count * sizeof(int32_t); pCmd->payloadLen = sizeof(SCompactMsg) + count * sizeof(int32_t);
pCmd->msgType = TSDB_MSG_TYPE_CM_COMPACT_VNODE; pCmd->msgType = TSDB_MSG_TYPE_COMPACT_VNODE;
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) { if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) {
tscError("0x%"PRIx64" failed to malloc for query msg", pSql->self); tscError("0x%"PRIx64" failed to malloc for query msg", pSql->self);
...@@ -1741,7 +1741,7 @@ int tscBuildCompactMsg(SSqlObj *pSql, SSqlInfo *pInfo) { ...@@ -1741,7 +1741,7 @@ int tscBuildCompactMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
int tscBuildRetrieveFromMgmtMsg(SSqlObj *pSql, SSqlInfo *pInfo) { int tscBuildRetrieveFromMgmtMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SSqlCmd *pCmd = &pSql->cmd; SSqlCmd *pCmd = &pSql->cmd;
pCmd->msgType = TSDB_MSG_TYPE_CM_RETRIEVE; pCmd->msgType = TSDB_MSG_TYPE_SHOW_RETRIEVE;
pCmd->payloadLen = sizeof(SRetrieveTableMsg); pCmd->payloadLen = sizeof(SRetrieveTableMsg);
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) { if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) {
...@@ -1861,7 +1861,7 @@ int tscProcessEmptyResultRsp(SSqlObj *pSql) { return tscLocalResultCommonBuilder ...@@ -1861,7 +1861,7 @@ int tscProcessEmptyResultRsp(SSqlObj *pSql) { return tscLocalResultCommonBuilder
int tscBuildConnectMsg(SSqlObj *pSql, SSqlInfo *pInfo) { int tscBuildConnectMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
STscObj *pObj = pSql->pTscObj; STscObj *pObj = pSql->pTscObj;
SSqlCmd *pCmd = &pSql->cmd; SSqlCmd *pCmd = &pSql->cmd;
pCmd->msgType = TSDB_MSG_TYPE_CM_CONNECT; pCmd->msgType = TSDB_MSG_TYPE_CONNECT;
pCmd->payloadLen = sizeof(SConnectMsg); pCmd->payloadLen = sizeof(SConnectMsg);
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) { if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) {
...@@ -1898,7 +1898,7 @@ int tscBuildConnectMsg(SSqlObj *pSql, SSqlInfo *pInfo) { ...@@ -1898,7 +1898,7 @@ int tscBuildConnectMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
int tscBuildMultiTableMetaMsg(SSqlObj *pSql, SSqlInfo *pInfo) { int tscBuildMultiTableMetaMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SSqlCmd *pCmd = &pSql->cmd; SSqlCmd *pCmd = &pSql->cmd;
pCmd->msgType = TSDB_MSG_TYPE_CM_TABLES_META; pCmd->msgType = TSDB_MSG_TYPE_TABLES_META;
assert(pCmd->payloadLen + minMsgSize() <= pCmd->allocSize); assert(pCmd->payloadLen + minMsgSize() <= pCmd->allocSize);
tscDebug("0x%"PRIx64" build load multi-tablemeta msg completed, numOfTables:%d, msg size:%d", pSql->self, pCmd->count, tscDebug("0x%"PRIx64" build load multi-tablemeta msg completed, numOfTables:%d, msg size:%d", pSql->self, pCmd->count,
...@@ -1925,7 +1925,7 @@ int tscBuildSTableVgroupMsg(SSqlObj *pSql, SSqlInfo *pInfo) { ...@@ -1925,7 +1925,7 @@ int tscBuildSTableVgroupMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pMsg += TSDB_TABLE_FNAME_LEN; pMsg += TSDB_TABLE_FNAME_LEN;
} }
pCmd->msgType = TSDB_MSG_TYPE_CM_STABLE_VGROUP; pCmd->msgType = TSDB_MSG_TYPE_STABLE_VGROUP;
pCmd->payloadLen = (int32_t)(pMsg - pCmd->payload); pCmd->payloadLen = (int32_t)(pMsg - pCmd->payload);
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
...@@ -1948,7 +1948,7 @@ int tscBuildRetrieveFuncMsg(SSqlObj *pSql, SSqlInfo *pInfo) { ...@@ -1948,7 +1948,7 @@ int tscBuildRetrieveFuncMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pMsg += varDataNetTLen(pMsg); pMsg += varDataNetTLen(pMsg);
} }
pCmd->msgType = TSDB_MSG_TYPE_CM_RETRIEVE_FUNC; pCmd->msgType = TSDB_MSG_TYPE_RETRIEVE_FUNC;
pCmd->payloadLen = (int32_t)(pMsg - pCmd->payload); pCmd->payloadLen = (int32_t)(pMsg - pCmd->payload);
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
...@@ -1996,7 +1996,7 @@ int tscBuildHeartBeatMsg(SSqlObj *pSql, SSqlInfo *pInfo) { ...@@ -1996,7 +1996,7 @@ int tscBuildHeartBeatMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pthread_mutex_unlock(&pObj->mutex); pthread_mutex_unlock(&pObj->mutex);
pCmd->payloadLen = msgLen; pCmd->payloadLen = msgLen;
pCmd->msgType = TSDB_MSG_TYPE_CM_HEARTBEAT; pCmd->msgType = TSDB_MSG_TYPE_HEARTBEAT;
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
...@@ -2838,7 +2838,7 @@ static int32_t getTableMetaFromMnode(SSqlObj *pSql, STableMetaInfo *pTableMetaIn ...@@ -2838,7 +2838,7 @@ static int32_t getTableMetaFromMnode(SSqlObj *pSql, STableMetaInfo *pTableMetaIn
} }
pNew->cmd.payloadLen = (int32_t)(pMsg - (char*)pInfoMsg); pNew->cmd.payloadLen = (int32_t)(pMsg - (char*)pInfoMsg);
pNew->cmd.msgType = TSDB_MSG_TYPE_CM_TABLE_META; pNew->cmd.msgType = TSDB_MSG_TYPE_TABLE_META;
} }
int32_t code = tscBuildAndSendRequest(pNew, NULL); int32_t code = tscBuildAndSendRequest(pNew, NULL);
...@@ -2913,7 +2913,7 @@ int32_t getMultiTableMetaFromMnode(SSqlObj *pSql, SArray* pNameList, SArray* pVg ...@@ -2913,7 +2913,7 @@ int32_t getMultiTableMetaFromMnode(SSqlObj *pSql, SArray* pNameList, SArray* pVg
} }
pNew->cmd.payloadLen = (int32_t) ((start - pInfo->tableNames) + sizeof(SMultiTableInfoMsg)); pNew->cmd.payloadLen = (int32_t) ((start - pInfo->tableNames) + sizeof(SMultiTableInfoMsg));
pNew->cmd.msgType = TSDB_MSG_TYPE_CM_TABLES_META; pNew->cmd.msgType = TSDB_MSG_TYPE_TABLES_META;
registerSqlObj(pNew); registerSqlObj(pNew);
tscDebug("0x%"PRIx64" new pSqlObj:0x%"PRIx64" to get %d tableMeta, vgroupInfo:%d, udf:%d, msg size:%d", pSql->self, tscDebug("0x%"PRIx64" new pSqlObj:0x%"PRIx64" to get %d tableMeta, vgroupInfo:%d, udf:%d, msg size:%d", pSql->self,
......
...@@ -5011,7 +5011,7 @@ int tscTransferTableNameList(SSqlObj *pSql, const char *pNameList, int32_t lengt ...@@ -5011,7 +5011,7 @@ int tscTransferTableNameList(SSqlObj *pSql, const char *pNameList, int32_t lengt
SSqlCmd *pCmd = &pSql->cmd; SSqlCmd *pCmd = &pSql->cmd;
pCmd->command = TSDB_SQL_MULTI_META; pCmd->command = TSDB_SQL_MULTI_META;
pCmd->msgType = TSDB_MSG_TYPE_CM_TABLES_META; pCmd->msgType = TSDB_MSG_TYPE_TABLES_META;
int code = TSDB_CODE_TSC_INVALID_TABLE_ID_LENGTH; int code = TSDB_CODE_TSC_INVALID_TABLE_ID_LENGTH;
char *str = (char *)pNameList; char *str = (char *)pNameList;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册