Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
慢慢CG
TDengine
提交
7021d3f6
T
TDengine
项目概览
慢慢CG
/
TDengine
与 Fork 源项目一致
Fork自
taosdata / TDengine
通知
1
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
T
TDengine
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
7021d3f6
编写于
1月 15, 2020
作者:
H
hjxilinx
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
refactor codes
上级
9e5ddfe4
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
53 addition
and
10 deletion
+53
-10
src/client/src/tscJoinProcess.c
src/client/src/tscJoinProcess.c
+8
-6
src/system/detail/inc/vnode.h
src/system/detail/inc/vnode.h
+9
-0
src/system/detail/src/vnodeQueryImpl.c
src/system/detail/src/vnodeQueryImpl.c
+36
-4
未找到文件。
src/client/src/tscJoinProcess.c
浏览文件 @
7021d3f6
...
...
@@ -315,9 +315,9 @@ int32_t tscLaunchSecondSubquery(SSqlObj* pSql) {
tscFieldInfoCopyAll
(
&
pQueryInfo
->
fieldsInfo
,
&
pSupporter
->
fieldsInfo
);
/*
* if the first column of the secondary query is not ts function, add this function.
* if the first column of the secondary query is not ts function, add this function.
* Because this column is required to filter with timestamp after intersecting.
*/
*/
if
(
pSupporter
->
exprsInfo
.
pExprs
[
0
].
functionId
!=
TSDB_FUNC_TS
)
{
tscAddTimestampColumn
(
pQueryInfo
,
TSDB_FUNC_TS
,
0
);
}
...
...
@@ -349,8 +349,7 @@ int32_t tscLaunchSecondSubquery(SSqlObj* pSql) {
tscPrintSelectClause
(
pNew
,
0
);
tscTrace
(
"%p subquery:%p tableIndex:%d, vnodeIdx:%d, type:%d, transfer to ts_comp query to retrieve timestamps, "
"exprInfo:%d, colList:%d, fieldsInfo:%d, name:%s"
,
tscTrace
(
"%p subquery:%p tableIndex:%d, vnodeIdx:%d, type:%d, exprInfo:%d, colList:%d, fieldsInfo:%d, name:%s"
,
pSql
,
pNew
,
0
,
pMeterMetaInfo
->
vnodeIndex
,
pNewQueryInfo
->
type
,
pNewQueryInfo
->
exprsInfo
.
numOfExprs
,
pNewQueryInfo
->
colList
.
numOfCols
,
pNewQueryInfo
->
fieldsInfo
.
numOfOutputCols
,
pNewQueryInfo
->
pMeterInfo
[
0
]
->
name
);
...
...
@@ -391,7 +390,10 @@ static void doQuitSubquery(SSqlObj* pParentSql) {
}
static
void
quitAllSubquery
(
SSqlObj
*
pSqlObj
,
SJoinSubquerySupporter
*
pSupporter
)
{
if
(
atomic_add_fetch_32
(
&
pSupporter
->
pState
->
numOfCompleted
,
1
)
>=
pSupporter
->
pState
->
numOfTotal
)
{
int32_t
numOfTotal
=
pSupporter
->
pState
->
numOfCompleted
;
int32_t
finished
=
atomic_add_fetch_32
(
&
pSupporter
->
pState
->
numOfCompleted
,
1
);
if
(
finished
>=
numOfTotal
)
{
pSqlObj
->
res
.
code
=
abs
(
pSupporter
->
pState
->
code
);
tscError
(
"%p all subquery return and query failed, global code:%d"
,
pSqlObj
,
pSqlObj
->
res
.
code
);
...
...
@@ -897,7 +899,7 @@ STSBuf* tsBufCreateFromFile(const char* path, bool autoDelete) {
pTSBuf
->
f
=
fopen
(
pTSBuf
->
path
,
"r+"
);
if
(
pTSBuf
->
f
==
NULL
)
{
free
(
pTSBuf
);
free
(
pTSBuf
);
return
NULL
;
}
...
...
src/system/detail/inc/vnode.h
浏览文件 @
7021d3f6
...
...
@@ -239,10 +239,19 @@ typedef struct SQuery {
int
lfd
;
// only for query in file, last file handle
SCompBlock
*
pBlock
;
// only for query in file
SField
**
pFields
;
int
numOfBlocks
;
// only for query in file
int
blockBufferSize
;
// length of pBlock buffer
int
currentSlot
;
int
firstSlot
;
/*
* the two parameters are utilized to handle the data missing situation, caused by import operation.
* When the commit slot is the first slot, and commitPoints != 0
*/
int32_t
commitSlot
;
// which slot is committed,
int32_t
commitPoint
;
// starting point for next commit
int
slot
;
int
pos
;
TSKEY
key
;
...
...
src/system/detail/src/vnodeQueryImpl.c
浏览文件 @
7021d3f6
...
...
@@ -1146,6 +1146,32 @@ SCacheBlock *getCacheDataBlock(SMeterObj *pMeterObj, SQueryRuntimeEnv* pRuntimeE
// keep the structure as well as the block data into local buffer
memcpy
(
&
pRuntimeEnv
->
cacheBlock
,
pBlock
,
sizeof
(
SCacheBlock
));
// the commit data points will be ignored
int32_t
offset
=
0
;
int32_t
numOfPoints
=
pBlock
->
numOfPoints
;
if
(
pQuery
->
firstSlot
==
pQuery
->
commitSlot
)
{
assert
(
pQuery
->
commitPoint
>=
0
&&
pQuery
->
commitPoint
<=
pBlock
->
numOfPoints
);
offset
=
pQuery
->
commitPoint
;
numOfPoints
=
pBlock
->
numOfPoints
-
offset
;
if
(
offset
!=
0
)
{
dTrace
(
"%p ignore the data in cache block that are commit already, numOfblock:%d slot:%d ignore points:%d. "
"first:%d last:%d"
,
GET_QINFO_ADDR
(
pQuery
),
pQuery
->
numOfBlocks
,
pQuery
->
slot
,
pQuery
->
commitPoint
,
pQuery
->
firstSlot
,
pQuery
->
currentSlot
);
}
pBlock
->
numOfPoints
=
numOfPoints
;
// current block are all commit already, ignore it
if
(
pBlock
->
numOfPoints
==
0
)
{
dTrace
(
"%p ignore current in cache block that are all commit already, numOfblock:%d slot:%d"
"first:%d last:%d"
,
GET_QINFO_ADDR
(
pQuery
),
pQuery
->
numOfBlocks
,
pQuery
->
slot
,
pQuery
->
firstSlot
,
pQuery
->
currentSlot
);
return
NULL
;
}
}
// keep the data from in cache into the temporarily allocated buffer
for
(
int32_t
i
=
0
;
i
<
pQuery
->
numOfCols
;
++
i
)
{
SColumnInfoEx
*
pColumnInfoEx
=
&
pQuery
->
colList
[
i
];
...
...
@@ -1164,9 +1190,9 @@ SCacheBlock *getCacheDataBlock(SMeterObj *pMeterObj, SQueryRuntimeEnv* pRuntimeE
assert
(
pCol
->
colId
==
pQuery
->
colList
[
i
].
data
.
colId
&&
bytes
==
pColumnInfoEx
->
data
.
bytes
&&
type
==
pColumnInfoEx
->
data
.
type
);
memcpy
(
dst
,
pBlock
->
offset
[
columnIndex
]
,
pBlock
->
numOfPoints
*
bytes
);
memcpy
(
dst
,
pBlock
->
offset
[
columnIndex
]
+
offset
*
bytes
,
numOfPoints
*
bytes
);
}
else
{
setNullN
(
dst
,
type
,
bytes
,
pBlock
->
numOfPoints
);
setNullN
(
dst
,
type
,
bytes
,
numOfPoints
);
}
}
...
...
@@ -2500,18 +2526,24 @@ void getBasicCacheInfoSnapshot(SQuery *pQuery, SCacheInfo *pCacheInfo, int32_t v
// commitSlot here denotes the first uncommitted block in cache
int32_t
numOfBlocks
=
0
;
int32_t
lastSlot
=
0
;
int32_t
commitSlot
=
0
;
int32_t
commitPoint
=
0
;
SCachePool
*
pPool
=
(
SCachePool
*
)
vnodeList
[
vid
].
pCachePool
;
pthread_mutex_lock
(
&
pPool
->
vmutex
);
numOfBlocks
=
pCacheInfo
->
numOfBlocks
;
lastSlot
=
pCacheInfo
->
currentSlot
;
commitSlot
=
pCacheInfo
->
commitSlot
;
commitPoint
=
pCacheInfo
->
commitPoint
;
pthread_mutex_unlock
(
&
pPool
->
vmutex
);
// make sure it is there, otherwise, return right away
pQuery
->
currentSlot
=
lastSlot
;
pQuery
->
numOfBlocks
=
numOfBlocks
;
pQuery
->
firstSlot
=
getFirstCacheSlot
(
numOfBlocks
,
lastSlot
,
pCacheInfo
);
pQuery
->
commitSlot
=
commitSlot
;
pQuery
->
commitPoint
=
commitPoint
;
/*
* Note: the block id is continuous increasing, never becomes smaller.
*
...
...
@@ -4437,7 +4469,7 @@ static void doHandleDataBlockImpl(SQueryRuntimeEnv *pRuntimeEnv, SBlockInfo *pbl
pSummary
->
fileTimeUs
+=
(
taosGetTimestampUs
()
-
start
);
}
else
{
assert
(
vnodeIsDatablockLoaded
(
pRuntimeEnv
,
pRuntimeEnv
->
pMeterObj
,
-
1
,
true
));
assert
(
vnodeIsDatablockLoaded
(
pRuntimeEnv
,
pRuntimeEnv
->
pMeterObj
,
-
1
,
true
)
==
DISK_BLOCK_NO_NEED_TO_LOAD
);
SCacheBlock
*
pBlock
=
getCacheDataBlock
(
pRuntimeEnv
->
pMeterObj
,
pRuntimeEnv
,
pQuery
->
slot
);
*
pblockInfo
=
getBlockBasicInfo
(
pRuntimeEnv
,
pBlock
,
BLK_CACHE_BLOCK
);
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录