Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
magicwindyyd
mindspore
提交
4094c468
M
mindspore
项目概览
magicwindyyd
/
mindspore
与 Fork 源项目一致
Fork自
MindSpore / mindspore
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
M
mindspore
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
4094c468
编写于
7月 07, 2020
作者:
K
kswang
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
use multi thread for reduce sparse gradient
上级
faa1084b
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
57 addition
and
26 deletion
+57
-26
mindspore/ccsrc/kernel/common_utils.cc
mindspore/ccsrc/kernel/common_utils.cc
+53
-25
mindspore/ccsrc/kernel/common_utils.h
mindspore/ccsrc/kernel/common_utils.h
+4
-1
未找到文件。
mindspore/ccsrc/kernel/common_utils.cc
浏览文件 @
4094c468
...
...
@@ -579,8 +579,40 @@ void WorkerForReduceSparseGradient(WorkerParamsForReduceSparseGradient param) {
}
}
void
RunMultiThreadReduceSparseGradient
(
const
SparseGradient
&
origin_sparse_grad
,
SparseGradient
*
unique_grad
,
size_t
outer_dim
,
std
::
vector
<
std
::
pair
<
int
,
size_t
>>
*
sorted_indices
,
std
::
vector
<
size_t
>
*
slice_positions
)
{
MS_LOG
(
DEBUG
)
<<
"Start"
;
size_t
thread_num
=
24
;
if
(
slice_positions
->
size
()
<
thread_num
)
{
thread_num
=
slice_positions
->
size
();
}
size_t
stride
=
(
slice_positions
->
size
()
+
thread_num
-
1
)
/
thread_num
;
thread_num
=
(
slice_positions
->
size
()
+
stride
-
1
)
/
stride
;
std
::
vector
<
std
::
thread
>
threads
;
size_t
max_length
=
sorted_indices
->
size
()
*
outer_dim
;
for
(
size_t
i
=
0
;
i
<
thread_num
;
++
i
)
{
size_t
slice_start
=
i
*
stride
;
size_t
slice_end
=
0
;
if
(
i
==
thread_num
-
1
)
{
slice_end
=
slice_positions
->
size
();
}
else
{
slice_end
=
slice_start
+
stride
;
}
WorkerParamsForReduceSparseGradient
params
{
slice_start
,
slice_end
,
max_length
,
outer_dim
,
sorted_indices
,
slice_positions
,
origin_sparse_grad
.
value_
,
unique_grad
};
threads
.
emplace_back
(
std
::
thread
(
WorkerForReduceSparseGradient
,
params
));
}
for
(
size_t
i
=
0
;
i
<
thread_num
;
++
i
)
{
threads
[
i
].
join
();
}
MS_LOG
(
DEBUG
)
<<
"End"
;
}
void
ReduceSparseGradient
(
const
SparseGradient
&
origin_sparse_grad
,
SparseGradient
*
unique_grad
,
size_t
first_dim
,
size_t
outer_dim
)
{
size_t
outer_dim
,
bool
use_multi_threads
)
{
MS_LOG
(
DEBUG
)
<<
"Start"
;
MS_EXCEPTION_IF_NULL
(
origin_sparse_grad
.
value_
);
MS_EXCEPTION_IF_NULL
(
origin_sparse_grad
.
indices_
);
MS_EXCEPTION_IF_NULL
(
unique_grad
);
...
...
@@ -599,42 +631,35 @@ void ReduceSparseGradient(const SparseGradient &origin_sparse_grad, SparseGradie
[](
const
std
::
pair
<
int
,
size_t
>
&
left
,
const
std
::
pair
<
int
,
size_t
>
&
right
)
{
return
left
.
first
<
right
.
first
;
});
int
last_index
=
0
;
std
::
vector
<
size_t
>
slice_positions
;
slice_positions
.
reserve
(
sorted_indices
.
size
());
for
(
size_t
i
=
0
;
i
<
sorted_indices
.
size
();
++
i
)
{
if
(
i
==
0
||
last_index
!=
sorted_indices
[
i
].
first
)
{
slice_positions
.
emplace_back
(
i
);
}
last_index
=
sorted_indices
[
i
].
first
;
}
size_t
thread_num
=
8
;
if
(
slice_positions
.
size
()
<
thread_num
)
{
thread_num
=
slice_positions
.
size
();
}
size_t
stride
=
(
slice_positions
.
size
()
+
thread_num
-
1
)
/
thread_num
;
thread_num
=
(
slice_positions
.
size
()
+
stride
-
1
)
/
stride
;
std
::
vector
<
std
::
thread
>
threads
;
size_t
max_length
=
sorted_indices
.
size
()
*
outer_dim
;
for
(
size_t
i
=
0
;
i
<
thread_num
;
++
i
)
{
size_t
slice_start
=
i
*
stride
;
size_t
slice_end
=
0
;
if
(
i
==
thread_num
-
1
)
{
slice_end
=
slice_positions
.
size
();
}
else
{
slice_end
=
slice_start
+
stride
;
}
WorkerParamsForReduceSparseGradient
params
{
slice_start
,
slice_end
,
max_length
,
outer_dim
,
&
sorted_indices
,
&
slice_positions
,
origin_sparse_grad
.
value_
,
unique_grad
};
threads
.
emplace_back
(
std
::
thread
(
WorkerForReduceSparseGradient
,
params
));
}
for
(
size_t
i
=
0
;
i
<
thread_num
;
++
i
)
{
threads
[
i
].
join
();
if
(
use_multi_threads
)
{
RunMultiThreadReduceSparseGradient
(
origin_sparse_grad
,
unique_grad
,
outer_dim
,
&
sorted_indices
,
&
slice_positions
);
}
else
{
size_t
max_length
=
sorted_indices
.
size
()
*
outer_dim
;
WorkerParamsForReduceSparseGradient
params
{
0
,
slice_positions
.
size
(),
max_length
,
outer_dim
,
&
sorted_indices
,
&
slice_positions
,
origin_sparse_grad
.
value_
,
unique_grad
};
WorkerForReduceSparseGradient
(
params
);
}
unique_grad
->
indices_size_
=
slice_positions
.
size
();
MS_LOG
(
DEBUG
)
<<
"End"
;
}
void
ReduceMultiSparseGradient
(
const
std
::
vector
<
std
::
shared_ptr
<
SparseGradient
>>
&
unique_slice_grads
,
SparseGradient
*
tmp_grad
,
SparseGradient
*
unique_grad
,
size_t
first_dim
,
size_t
outer_dim
)
{
MS_LOG
(
DEBUG
)
<<
"Start"
;
if
(
unique_slice_grads
.
empty
())
{
return
;
}
...
...
@@ -658,10 +683,12 @@ void ReduceMultiSparseGradient(const std::vector<std::shared_ptr<SparseGradient>
}
tmp_grad
->
indices_size_
=
unique_indices_size
;
ReduceSparseGradient
(
*
tmp_grad
,
unique_grad
,
first_dim
,
outer_dim
);
MS_LOG
(
DEBUG
)
<<
"End"
;
}
void
TwoLevelReduceSparseGradient
(
const
SparseGradient
&
origin_sparse_grad
,
SparseGradient
*
tmp_grad
,
SparseGradient
*
unique_grad
,
size_t
first_dim
,
size_t
outer_dim
)
{
MS_LOG
(
DEBUG
)
<<
"Start"
;
MS_EXCEPTION_IF_NULL
(
origin_sparse_grad
.
value_
);
MS_EXCEPTION_IF_NULL
(
origin_sparse_grad
.
indices_
);
MS_EXCEPTION_IF_NULL
(
unique_grad
);
...
...
@@ -693,12 +720,13 @@ void TwoLevelReduceSparseGradient(const SparseGradient &origin_sparse_grad, Spar
unique_slice_grads
[
i
]
->
indices_
=
unique_grad
->
indices_
+
indices_offset
;
unique_slice_grads
[
i
]
->
indices_size_
=
indices_size
;
threads
.
emplace_back
(
std
::
thread
(
ReduceSparseGradient
,
slice_grad
,
unique_slice_grads
[
i
].
get
(),
first_dim
,
outer_dim
));
std
::
thread
(
ReduceSparseGradient
,
slice_grad
,
unique_slice_grads
[
i
].
get
(),
first_dim
,
outer_dim
,
false
));
}
for
(
size_t
i
=
0
;
i
<
thread_num
;
++
i
)
{
threads
[
i
].
join
();
}
ReduceMultiSparseGradient
(
unique_slice_grads
,
tmp_grad
,
unique_grad
,
first_dim
,
outer_dim
);
MS_LOG
(
DEBUG
)
<<
"End"
;
}
std
::
pair
<
AnfNodePtr
,
size_t
>
GetKernelInput
(
const
AnfNodePtr
&
anf_node
,
size_t
index
)
{
...
...
mindspore/ccsrc/kernel/common_utils.h
浏览文件 @
4094c468
...
...
@@ -115,7 +115,7 @@ int Sign(float x);
void
DeduplicateIndexedSlices
(
const
SparseGradient
&
origin_sparse_grad
,
SparseGradient
*
unique_grad
,
size_t
first_dim
,
size_t
outer_dim
);
void
ReduceSparseGradient
(
const
SparseGradient
&
origin_sparse_grad
,
SparseGradient
*
unique_grad
,
size_t
first_dim
,
size_t
outer_dim
);
size_t
outer_dim
,
bool
use_multi_threads
=
true
);
std
::
pair
<
AnfNodePtr
,
size_t
>
GetKernelInput
(
const
AnfNodePtr
&
anf_node
,
size_t
index
);
std
::
vector
<
std
::
pair
<
AnfNodePtr
,
std
::
pair
<
size_t
,
size_t
>>>
GetInputIndex
(
const
std
::
vector
<
AnfNodePtr
>
&
node_list
,
const
std
::
vector
<
AnfNodePtr
>
&
input_list
);
...
...
@@ -130,6 +130,9 @@ void GetGraphRealOutput(const FuncGraphPtr &func_graph, std::vector<std::pair<An
bool
IsWeightBoundary
(
const
AnfNodePtr
&
node
);
void
MultiThreadCompute
(
const
MultiThreadComputeFunc
&
func
,
MultiThreadComputeParams
*
params
,
size_t
total_compute_size
);
void
RunMultiThreadReduceSparseGradient
(
const
SparseGradient
&
origin_sparse_grad
,
SparseGradient
*
unique_grad
,
size_t
outer_dim
,
std
::
vector
<
std
::
pair
<
int
,
size_t
>>
*
sorted_indices
,
std
::
vector
<
size_t
>
*
slice_positions
);
void
ReduceMultiSparseGradient
(
const
std
::
vector
<
std
::
shared_ptr
<
SparseGradient
>>
&
unique_slice_grads
,
SparseGradient
*
tmp_grad
,
SparseGradient
*
unique_grad
,
size_t
first_dim
,
size_t
outer_dim
);
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录