未验证 提交 40dd568d 编写于 作者: X Xiaoyu Wang 提交者: GitHub

Merge pull request #10678 from taosdata/feature/3.0_query_integrate_wxy

TD-13747 bugfix
......@@ -77,7 +77,9 @@ const int32_t funcMgtBuiltinsNum = (sizeof(funcMgtBuiltins) / sizeof(SBuiltinFun
int32_t stubCheckAndGetResultType(SFunctionNode* pFunc) {
switch(pFunc->funcType) {
case FUNCTION_TYPE_COUNT: pFunc->node.resType = (SDataType){.bytes = sizeof(int64_t), .type = TSDB_DATA_TYPE_BIGINT};break;
case FUNCTION_TYPE_COUNT:
pFunc->node.resType = (SDataType){.bytes = sizeof(int64_t), .type = TSDB_DATA_TYPE_BIGINT};
break;
case FUNCTION_TYPE_SUM: {
SColumnNode* pParam = nodesListGetNode(pFunc->pParameterList, 0);
int32_t paraType = pParam->node.resType.type;
......@@ -103,6 +105,9 @@ int32_t stubCheckAndGetResultType(SFunctionNode* pFunc) {
pFunc->node.resType = (SDataType) { .bytes = tDataTypes[paraType].bytes, .type = paraType };
break;
}
case FUNCTION_TYPE_CONCAT:
// todo
break;
default:
ASSERT(0); // to found the fault ASAP.
}
......
......@@ -271,6 +271,13 @@ static SNode* slotDescCopy(const SSlotDescNode* pSrc, SSlotDescNode* pDst) {
return (SNode*)pDst;
}
static SNode* downstreamSourceCopy(const SDownstreamSourceNode* pSrc, SDownstreamSourceNode* pDst) {
COPY_SCALAR_FIELD(addr);
COPY_SCALAR_FIELD(taskId);
COPY_SCALAR_FIELD(schedId);
return (SNode*)pDst;
}
SNodeptr nodesCloneNode(const SNodeptr pNode) {
if (NULL == pNode) {
return NULL;
......@@ -306,6 +313,8 @@ SNodeptr nodesCloneNode(const SNodeptr pNode) {
return dataBlockDescCopy((const SDataBlockDescNode*)pNode, (SDataBlockDescNode*)pDst);
case QUERY_NODE_SLOT_DESC:
return slotDescCopy((const SSlotDescNode*)pNode, (SSlotDescNode*)pDst);
case QUERY_NODE_DOWNSTREAM_SOURCE:
return downstreamSourceCopy((const SDownstreamSourceNode*)pNode, (SDownstreamSourceNode*)pDst);
case QUERY_NODE_LOGIC_PLAN_SCAN:
return logicScanCopy((const SScanLogicNode*)pNode, (SScanLogicNode*)pDst);
case QUERY_NODE_LOGIC_PLAN_AGG:
......
......@@ -74,6 +74,8 @@ SNodeptr nodesMakeNode(ENodeType type) {
return makeNode(type, sizeof(SSlotDescNode));
case QUERY_NODE_COLUMN_DEF:
return makeNode(type, sizeof(SColumnDefNode));
case QUERY_NODE_DOWNSTREAM_SOURCE:
return makeNode(type, sizeof(SDownstreamSourceNode));
case QUERY_NODE_SET_OPERATOR:
return makeNode(type, sizeof(SSetOperator));
case QUERY_NODE_SELECT_STMT:
......
......@@ -891,7 +891,7 @@ static int32_t translateDropTable(STranslateContext* pCxt, SDropTableStmt* pStmt
if (TSDB_SUPER_TABLE == pTableMeta->tableType) {
code = doTranslateDropSuperTable(pCxt, &tableName, pClause->ignoreNotExists);
} else {
// todo;
// todo : drop normal table or child table
code = TSDB_CODE_FAILED;
}
}
......@@ -1203,6 +1203,9 @@ static int32_t setReslutSchema(STranslateContext* pCxt, SQuery* pQuery) {
}
static void destroyTranslateContext(STranslateContext* pCxt) {
if (NULL != pCxt->pNsLevel) {
}
taosArrayDestroy(pCxt->pNsLevel);
if (NULL != pCxt->pCmdMsg) {
tfree(pCxt->pCmdMsg->pMsg);
......@@ -1222,6 +1225,11 @@ static void toSchema(const SColumnDefNode* pCol, int32_t colId, SSchema* pSchema
strcpy(pSchema->name, pCol->colName);
}
static void destroyCreateTbReq(SVCreateTbReq* pReq) {
tfree(pReq->name);
tfree(pReq->ntbCfg.pSchema);
}
static int32_t buildNormalTableBatchReq(
const char* pTableName, const SNodeList* pColumns, const SVgroupInfo* pVgroupInfo, SVgroupTablesBatch* pBatch) {
SVCreateTbReq req = {0};
......@@ -1230,6 +1238,7 @@ static int32_t buildNormalTableBatchReq(
req.ntbCfg.nCols = LIST_LENGTH(pColumns);
req.ntbCfg.pSchema = calloc(req.ntbCfg.nCols, sizeof(SSchema));
if (NULL == req.name || NULL == req.ntbCfg.pSchema) {
destroyCreateTbReq(&req);
return TSDB_CODE_OUT_OF_MEMORY;
}
SNode* pCol;
......@@ -1242,6 +1251,7 @@ static int32_t buildNormalTableBatchReq(
pBatch->info = *pVgroupInfo;
pBatch->req.pArray = taosArrayInit(1, sizeof(struct SVCreateTbReq));
if (NULL == pBatch->req.pArray) {
destroyCreateTbReq(&req);
return TSDB_CODE_OUT_OF_MEMORY;
}
taosArrayPush(pBatch->req.pArray, &req);
......@@ -1311,18 +1321,20 @@ static int32_t rewriteToVnodeModifOpStmt(SQuery* pQuery, SArray* pBufArray) {
}
static int32_t buildCreateTableDataBlock(const SCreateTableStmt* pStmt, const SVgroupInfo* pInfo, SArray** pBufArray) {
*pBufArray = taosArrayInit(1, POINTER_BYTES);
if (NULL == *pBufArray) {
return TSDB_CODE_OUT_OF_MEMORY;
}
SVgroupTablesBatch tbatch = {0};
int32_t code = buildNormalTableBatchReq(pStmt->tableName, pStmt->pCols, pInfo, &tbatch);
if (TSDB_CODE_SUCCESS == code) {
*pBufArray = taosArrayInit(1, POINTER_BYTES);
if (NULL == pBufArray) {
code = TSDB_CODE_OUT_OF_MEMORY;
}
}
if (TSDB_CODE_SUCCESS == code) {
code = serializeVgroupTablesBatch(&tbatch, *pBufArray);
}
destroyCreateTbReqBatch(&tbatch);
if (TSDB_CODE_SUCCESS != code) {
// todo : destroyCreateTbReqArray(*pBufArray);
}
return code;
}
......@@ -1331,7 +1343,7 @@ static int32_t rewriteCreateTable(STranslateContext* pCxt, SQuery* pQuery) {
SVgroupInfo info = {0};
int32_t code = getTableHashVgroup(pCxt->pParseCxt, pStmt->dbName, pStmt->tableName, &info);
SArray* pBufArray;
SArray* pBufArray = NULL;
if (TSDB_CODE_SUCCESS == code) {
code = buildCreateTableDataBlock(pStmt, &info, &pBufArray);
}
......
......@@ -48,7 +48,7 @@ static SNode* createSlotDesc(SPhysiPlanContext* pCxt, const SNode* pNode, int16_
pSlot->slotId = slotId;
pSlot->dataType = ((SExprNode*)pNode)->resType;
pSlot->reserve = false;
pSlot->output = false;
pSlot->output = true;
return (SNode*)pSlot;
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册