Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
fac87022
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
fac87022
编写于
12月 13, 2018
作者:
Q
Qiao Longfei
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
adam support multithread
上级
e2130502
变更
4
隐藏空白更改
内联
并排
Showing
4 changed file
with
33 addition
and
5 deletion
+33
-5
paddle/fluid/framework/operator.cc
paddle/fluid/framework/operator.cc
+2
-0
paddle/fluid/framework/operator.h
paddle/fluid/framework/operator.h
+3
-0
paddle/fluid/operators/optimizers/adam_op.h
paddle/fluid/operators/optimizers/adam_op.h
+26
-4
python/paddle/fluid/__init__.py
python/paddle/fluid/__init__.py
+2
-1
未找到文件。
paddle/fluid/framework/operator.cc
浏览文件 @
fac87022
...
...
@@ -30,6 +30,8 @@ DECLARE_bool(benchmark);
DEFINE_bool
(
check_nan_inf
,
false
,
"Checking whether operator produce NAN/INF or not. It will be "
"extremely slow so please use this flag wisely."
);
DEFINE_int32
(
inner_op_parallelism
,
0
,
"number of threads for inner op"
);
DEFINE_int32
(
min_param_size_to_use_multithread
,
0
,
""
);
namespace
paddle
{
namespace
framework
{
...
...
paddle/fluid/framework/operator.h
浏览文件 @
fac87022
...
...
@@ -34,6 +34,9 @@ limitations under the License. */
#include "paddle/fluid/platform/device_context.h"
#include "paddle/fluid/platform/variant.h"
DECLARE_int32
(
inner_op_parallelism
);
DECLARE_int32
(
min_param_size_to_use_multithread
);
namespace
paddle
{
namespace
framework
{
...
...
paddle/fluid/operators/optimizers/adam_op.h
浏览文件 @
fac87022
...
...
@@ -17,6 +17,7 @@ limitations under the License. */
#include <Eigen/Dense>
#include <vector>
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/threadpool.h"
#include "paddle/fluid/operators/detail/safe_ref.h"
#include "paddle/fluid/operators/math/algorithm.h"
#include "paddle/fluid/operators/math/selected_rows_functor.h"
...
...
@@ -352,10 +353,31 @@ class AdamOpKernel : public framework::OpKernel<T> {
lr
.
template
data
<
T
>(),
grad_data
,
param
.
template
data
<
T
>(),
param_out
.
template
mutable_data
<
T
>(
ctx
.
GetPlace
()),
rows
,
row_numel
,
grad_merge
.
rows
().
size
());
platform
::
ForRange
<
DeviceContext
>
for_range
(
static_cast
<
const
DeviceContext
&>
(
ctx
.
device_context
()),
param
.
numel
());
for_range
(
functor
);
int
inner_op_parallelism
=
FLAGS_inner_op_parallelism
;
if
(
inner_op_parallelism
>
1
&&
FLAGS_min_param_size_to_use_multithread
>
0
&&
param
.
numel
()
>
FLAGS_min_param_size_to_use_multithread
)
{
std
::
vector
<
std
::
future
<
void
>>
fs
;
int64_t
block_size
=
param
.
numel
()
/
inner_op_parallelism
;
for
(
int
i
=
0
;
i
<
inner_op_parallelism
;
++
i
)
{
int64_t
start
=
i
*
block_size
;
int64_t
end
=
(
i
+
1
)
*
block_size
;
if
(
end
>
param
.
numel
())
{
end
=
param
.
numel
();
}
fs
.
push_back
(
framework
::
Async
([
&
functor
,
start
,
end
]()
{
for
(
int64_t
i
=
start
;
i
<
end
;
++
i
)
{
functor
(
i
);
}
}));
}
for
(
size_t
i
=
0
;
i
<
fs
.
size
();
++
i
)
fs
[
i
].
wait
();
}
else
{
platform
::
ForRange
<
DeviceContext
>
for_range
(
static_cast
<
const
DeviceContext
&>
(
ctx
.
device_context
()),
param
.
numel
());
for_range
(
functor
);
}
}
else
{
PADDLE_THROW
(
"Variable type not supported by adam_op"
);
}
...
...
python/paddle/fluid/__init__.py
浏览文件 @
fac87022
...
...
@@ -128,7 +128,8 @@ def __bootstrap__():
'free_idle_memory'
,
'paddle_num_threads'
,
"dist_threadpool_size"
,
'eager_delete_tensor_gb'
,
'fast_eager_deletion_mode'
,
'allocator_strategy'
,
'reader_queue_speed_test_mode'
,
'print_sub_graph_dir'
,
'pe_profile_fname'
'print_sub_graph_dir'
,
'pe_profile_fname'
,
'inner_op_parallelism'
,
'min_param_size_to_use_multithread'
]
if
'Darwin'
not
in
sysstr
:
read_env_flags
.
append
(
'use_pinned_memory'
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录