Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
taosdata
TDengine
提交
a27fcc52
TDengine
项目概览
taosdata
/
TDengine
1 年多 前同步成功
通知
1185
Star
22016
Fork
4786
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
TDengine
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
a27fcc52
编写于
11月 29, 2019
作者:
H
Hongze Cheng
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
refactor part of code
上级
56bb3121
变更
1
隐藏空白更改
内联
并排
Showing
1 changed file
with
44 addition
and
57 deletion
+44
-57
src/system/detail/src/vnodeImport.c
src/system/detail/src/vnodeImport.c
+44
-57
未找到文件。
src/system/detail/src/vnodeImport.c
浏览文件 @
a27fcc52
...
...
@@ -1257,6 +1257,37 @@ int isCacheEnd(SBlockIter iter, SMeterObj *pMeter) {
return
((
iter
.
slot
==
slot
)
&&
(
iter
.
pos
==
pos
));
}
static
void
vnodeFlushMergeBuffer
(
SMergeBuffer
*
pBuffer
,
SBlockIter
*
pWriteIter
,
SBlockIter
*
pCacheIter
,
SMeterObj
*
pObj
,
SCacheInfo
*
pInfo
,
int
checkBound
)
{
// Function to flush the merge buffer data to cache
if
(
pWriteIter
->
pos
==
pObj
->
pointsPerBlock
)
{
pWriteIter
->
pos
=
0
;
pWriteIter
->
slot
=
(
pWriteIter
->
slot
+
1
)
%
pInfo
->
maxBlocks
;
}
while
(
pBuffer
->
spos
!=
pBuffer
->
epos
)
{
if
(
checkBound
&&
pWriteIter
->
slot
==
pCacheIter
->
slot
&&
pWriteIter
->
pos
==
pCacheIter
->
pos
)
break
;
for
(
int
col
=
0
;
col
<
pObj
->
numOfColumns
;
col
++
)
{
memcpy
(
pInfo
->
cacheBlocks
[
pWriteIter
->
slot
]
->
offset
[
col
]
+
pObj
->
schema
[
col
].
bytes
*
pWriteIter
->
pos
,
pBuffer
->
offset
[
col
]
+
pObj
->
schema
[
col
].
bytes
*
pBuffer
->
spos
,
pObj
->
schema
[
col
].
bytes
);
}
if
(
pWriteIter
->
pos
+
1
<
pObj
->
pointsPerBlock
)
{
(
pWriteIter
->
pos
)
++
;
}
else
{
pInfo
->
cacheBlocks
[
pWriteIter
->
slot
]
->
numOfPoints
=
pWriteIter
->
pos
+
1
;
pWriteIter
->
slot
=
(
pWriteIter
->
slot
+
1
)
%
pInfo
->
maxBlocks
;
pWriteIter
->
pos
=
0
;
}
pBuffer
->
spos
=
(
pBuffer
->
spos
+
1
)
%
pBuffer
->
totalRows
;
}
if
((
!
checkBound
)
&&
pWriteIter
->
pos
!=
0
)
{
pInfo
->
cacheBlocks
[
pWriteIter
->
slot
]
->
numOfPoints
=
pWriteIter
->
pos
;
}
}
int
vnodeImportDataToCache
(
SImportInfo
*
pImport
,
const
char
*
payload
,
const
int
rows
)
{
SMeterObj
*
pObj
=
pImport
->
pObj
;
SVnodeObj
*
pVnode
=
vnodeList
+
pObj
->
vnode
;
...
...
@@ -1353,35 +1384,13 @@ int vnodeImportDataToCache(SImportInfo *pImport, const char *payload, const int
if
((
payloadIter
>=
rows
)
&&
isCacheIterEnd
)
break
;
if
((
pBuffer
->
epos
+
1
)
%
pBuffer
->
totalRows
==
pBuffer
->
spos
)
{
// merge buffer is full, flush
if
(
writeIter
.
pos
==
pObj
->
pointsPerBlock
)
{
writeIter
.
pos
=
0
;
writeIter
.
slot
=
(
writeIter
.
slot
+
1
)
%
pInfo
->
maxBlocks
;
}
while
(
pBuffer
->
spos
!=
pBuffer
->
epos
)
{
if
(
writeIter
.
slot
==
cacheIter
.
slot
&&
writeIter
.
pos
==
cacheIter
.
pos
)
break
;
for
(
int
col
=
0
;
col
<
pObj
->
numOfColumns
;
col
++
)
{
memcpy
(
pInfo
->
cacheBlocks
[
writeIter
.
slot
]
->
offset
[
col
]
+
pObj
->
schema
[
col
].
bytes
*
writeIter
.
pos
,
pBuffer
->
offset
[
col
]
+
pObj
->
schema
[
col
].
bytes
*
pBuffer
->
spos
,
pObj
->
schema
[
col
].
bytes
);
}
if
(
writeIter
.
pos
+
1
<
pObj
->
pointsPerBlock
)
{
writeIter
.
pos
++
;
}
else
{
pInfo
->
cacheBlocks
[
writeIter
.
slot
]
->
numOfPoints
=
writeIter
.
pos
+
1
;
writeIter
.
slot
=
(
writeIter
.
slot
+
1
)
%
pInfo
->
maxBlocks
;
writeIter
.
pos
=
0
;
}
pBuffer
->
spos
=
(
pBuffer
->
spos
+
1
)
%
pBuffer
->
totalRows
;
}
vnodeFlushMergeBuffer
(
pBuffer
,
&
writeIter
,
&
cacheIter
,
pObj
,
pInfo
,
1
);
}
if
((
payloadIter
>=
rows
)
||
((
!
isCacheIterEnd
)
&&
(
KEY_AT_INDEX
(
payload
,
pObj
->
bytesPerPoint
,
payloadIter
)
>
KEY_AT_INDEX
(
pInfo
->
cacheBlocks
[
cacheIter
.
slot
]
->
offset
[
0
],
sizeof
(
TSKEY
),
cacheIter
.
pos
))))
{
// if (payload end || (cacheIter not end && payloadKey > blockKey)), consume cache
TSKEY
payloadKey
=
(
payloadIter
<
rows
)
?
KEY_AT_INDEX
(
payload
,
pObj
->
bytesPerPoint
,
payloadIter
)
:
INT64_MAX
;
TSKEY
cacheKey
=
(
isCacheIterEnd
)
?
INT64_MAX
:
KEY_AT_INDEX
(
pInfo
->
cacheBlocks
[
cacheIter
.
slot
]
->
offset
[
0
],
sizeof
(
TSKEY
),
cacheIter
.
pos
);
if
(
cacheKey
<
payloadKey
)
{
// if (payload end || (cacheIter not end && payloadKey > blockKey)), consume cache
for
(
int
col
=
0
;
col
<
pObj
->
numOfColumns
;
col
++
)
{
memcpy
(
pBuffer
->
offset
[
col
]
+
pObj
->
schema
[
col
].
bytes
*
pBuffer
->
epos
,
pInfo
->
cacheBlocks
[
cacheIter
.
slot
]
->
offset
[
col
]
+
pObj
->
schema
[
col
].
bytes
*
cacheIter
.
pos
,
...
...
@@ -1389,11 +1398,7 @@ int vnodeImportDataToCache(SImportInfo *pImport, const char *payload, const int
}
FORWARD_ITER
(
cacheIter
,
1
,
pInfo
->
maxBlocks
,
pObj
->
pointsPerBlock
);
isCacheIterEnd
=
isCacheEnd
(
cacheIter
,
pObj
);
}
else
if
((
isCacheIterEnd
)
||
((
payloadIter
<
rows
)
&&
(
KEY_AT_INDEX
(
payload
,
pObj
->
bytesPerPoint
,
payloadIter
)
<
KEY_AT_INDEX
(
pInfo
->
cacheBlocks
[
cacheIter
.
slot
]
->
offset
[
0
],
sizeof
(
TSKEY
),
cacheIter
.
pos
))))
{
// cacheIter end || (payloadIter not end && payloadKey < blockKey), consume payload
}
else
if
(
cacheKey
>
payloadKey
)
{
// cacheIter end || (payloadIter not end && payloadKey < blockKey), consume payload
if
(
availPoints
==
0
)
{
// Need to allocate a new cache block
pthread_mutex_lock
(
&
(
pPool
->
vmutex
));
// TODO: Need to check if there are enough slots to hold a new one
...
...
@@ -1482,29 +1487,11 @@ int vnodeImportDataToCache(SImportInfo *pImport, const char *payload, const int
pBuffer
->
epos
=
(
pBuffer
->
epos
+
1
)
%
pBuffer
->
totalRows
;
}
if
(
pBuffer
->
spos
!=
pBuffer
->
epos
)
{
if
(
writeIter
.
pos
==
pObj
->
pointsPerBlock
)
{
writeIter
.
pos
=
0
;
writeIter
.
slot
=
(
writeIter
.
slot
+
1
)
%
pInfo
->
maxBlocks
;
}
while
(
pBuffer
->
spos
!=
pBuffer
->
epos
)
{
for
(
int
col
=
0
;
col
<
pObj
->
numOfColumns
;
col
++
)
{
memcpy
(
pInfo
->
cacheBlocks
[
writeIter
.
slot
]
->
offset
[
col
]
+
pObj
->
schema
[
col
].
bytes
*
writeIter
.
pos
,
pBuffer
->
offset
[
col
]
+
pObj
->
schema
[
col
].
bytes
*
pBuffer
->
spos
,
pObj
->
schema
[
col
].
bytes
);
}
if
(
writeIter
.
pos
+
1
<
pObj
->
pointsPerBlock
)
{
writeIter
.
pos
++
;
}
else
{
pInfo
->
cacheBlocks
[
writeIter
.
slot
]
->
numOfPoints
=
writeIter
.
pos
+
1
;
writeIter
.
slot
=
(
writeIter
.
slot
+
1
)
%
pInfo
->
maxBlocks
;
writeIter
.
pos
=
0
;
}
pBuffer
->
spos
=
(
pBuffer
->
spos
+
1
)
%
pBuffer
->
totalRows
;
}
if
(
writeIter
.
pos
!=
0
)
pInfo
->
cacheBlocks
[
writeIter
.
slot
]
->
numOfPoints
=
writeIter
.
pos
;
if
(
pBuffer
->
spos
!=
pBuffer
->
epos
)
{
// Flush the remaining data in the merge buffer
vnodeFlushMergeBuffer
(
pBuffer
,
&
writeIter
,
&
cacheIter
,
pObj
,
pInfo
,
0
);
}
else
{
// Should never come here
assert
(
false
);
}
if
(
isAppendData
)
{
...
...
@@ -1514,9 +1501,9 @@ int vnodeImportDataToCache(SImportInfo *pImport, const char *payload, const int
}
}
pImport
->
importedRows
+=
rowsImported
;
__sync_fetch_and_sub
(
&
(
pObj
->
freePoints
),
rowsImported
);
atomic_fetch_sub_32
(
&
(
pObj
->
freePoints
),
rowsImported
);
code
=
0
;
code
=
TSDB_CODE_SUCCESS
;
_exit:
tfree
(
pBuffer
);
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录