Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
02149ed1
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
02149ed1
编写于
7月 12, 2019
作者:
Z
zhouwei25
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'develop' of
https://github.com/PaddlePaddle/Paddle
into develop
上级
c2f71d1d
ff77dea9
变更
6
隐藏空白更改
内联
并排
Showing
6 changed file
with
94 addition
and
12 deletion
+94
-12
paddle/fluid/framework/attribute.h
paddle/fluid/framework/attribute.h
+26
-0
paddle/fluid/framework/operator.cc
paddle/fluid/framework/operator.cc
+25
-9
paddle/fluid/framework/operator.h
paddle/fluid/framework/operator.h
+3
-2
paddle/fluid/memory/allocation/allocator_facade.cc
paddle/fluid/memory/allocation/allocator_facade.cc
+3
-1
paddle/fluid/pybind/pybind_boost_headers.h
paddle/fluid/pybind/pybind_boost_headers.h
+9
-0
python/paddle/fluid/tests/unittests/test_fill_constant_op.py
python/paddle/fluid/tests/unittests/test_fill_constant_op.py
+28
-0
未找到文件。
paddle/fluid/framework/attribute.h
浏览文件 @
02149ed1
...
...
@@ -133,6 +133,32 @@ struct ExtractAttribute<std::vector<int64_t>> {
const
std
::
string
&
attr_name_
;
};
template
<
>
struct
ExtractAttribute
<
float
>
{
explicit
ExtractAttribute
(
const
std
::
string
&
attr_name
)
:
attr_name_
(
attr_name
)
{}
float
*
operator
()(
Attribute
&
attr
)
const
{
if
(
attr
.
type
()
==
typeid
(
int
))
{
// NOLINT
int
val
=
boost
::
get
<
int
>
(
attr
);
attr
=
static_cast
<
float
>
(
val
);
}
else
if
(
attr
.
type
()
==
typeid
(
int64_t
))
{
// NOLINT
int64_t
val
=
boost
::
get
<
int64_t
>
(
attr
);
attr
=
static_cast
<
float
>
(
val
);
}
float
*
attr_value
=
nullptr
;
try
{
attr_value
=
&
boost
::
get
<
float
>
(
attr
);
}
catch
(
boost
::
bad_get
&
bad_get
)
{
PADDLE_THROW
(
"Cannot get attribute %s by type float, its type is %s"
,
attr_name_
,
paddle
::
platform
::
demangle
(
attr
.
type
().
name
()));
}
return
attr_value
;
}
const
std
::
string
&
attr_name_
;
};
template
<
typename
T
>
inline
proto
::
AttrType
AttrTypeID
()
{
Attribute
tmp
=
T
();
...
...
paddle/fluid/framework/operator.cc
浏览文件 @
02149ed1
...
...
@@ -885,12 +885,12 @@ void OperatorWithKernel::RunImpl(const Scope& scope,
const
platform
::
Place
&
place
)
const
{
// To reduce the elapsed time of HasAttr, we use bool variable to record the
// result of HasAttr.
if
(
!
enable_cache_runtime_context
&&
HasAttr
(
kEnableCacheRuntimeContext
))
enable_cache_runtime_context
=
true
;
if
(
!
all_kernels_must_compute_runtime_shape
&&
if
(
!
enable_cache_runtime_context
_
&&
HasAttr
(
kEnableCacheRuntimeContext
))
enable_cache_runtime_context
_
=
true
;
if
(
!
all_kernels_must_compute_runtime_shape
_
&&
HasAttr
(
kAllKernelsMustComputeRuntimeShape
))
all_kernels_must_compute_runtime_shape
=
true
;
if
(
!
enable_cache_runtime_context
)
{
all_kernels_must_compute_runtime_shape
_
=
true
;
if
(
!
enable_cache_runtime_context
_
)
{
RuntimeContext
ctx
(
Inputs
(),
Outputs
(),
scope
);
RunImpl
(
scope
,
place
,
&
ctx
);
}
else
{
...
...
@@ -931,7 +931,7 @@ void OperatorWithKernel::RunImpl(const Scope& scope,
dev_ctx
=
pool
.
Get
(
kernel_type_
->
place_
);
}
if
(
!
all_kernels_must_compute_runtime_shape
)
{
if
(
!
all_kernels_must_compute_runtime_shape
_
)
{
RuntimeInferShapeContext
infer_shape_ctx
(
*
this
,
exec_scope
,
*
runtime_ctx
);
this
->
InferShape
(
&
infer_shape_ctx
);
}
...
...
@@ -981,6 +981,13 @@ void OperatorWithKernel::RunImpl(const Scope& scope,
}
}
}
// To solve issue #15032, have a discussion with @Luotao for cpu inference,
// do not cache transfer scope, hence in this case delete transfer scope
// after run to avoid memory leak
if
(
transfer_scope
&&
!
run_by_executor_
&&
!
enable_cache_transfer_scope_
)
{
scope
.
DeleteScope
(
transfer_scope
);
}
}
void
OperatorWithKernel
::
ChooseKernel
(
const
RuntimeContext
&
ctx
,
...
...
@@ -1114,9 +1121,18 @@ Scope* OperatorWithKernel::PrepareData(
// If this op is not called by an Executor or ParallelExecutor, it should
// called by a NaiveExecutor, the NaiveExecutor will cache the scopes and
// variables, that behavior a lot different.
if
(
!
run_by_executor_
)
{
//
// To solve issue #15032, have a discussion with @Luotao for cpu
// inference, for all cpu kernels cases without GPU participation, here
// not do transfer scope caching, and cpu inference performance is not
// impacted by test.
enable_cache_transfer_scope_
=
false
;
if
(
!
run_by_executor_
&&
(
platform
::
is_gpu_place
(
kernel_type_for_var
.
place_
)
||
platform
::
is_gpu_place
(
expected_kernel_key
.
place_
)))
{
new_scope
=
TryCreateTransferScope
(
kernel_type_for_var
,
expected_kernel_key
,
&
scope
);
enable_cache_transfer_scope_
=
true
;
}
if
(
!
new_scope
)
{
new_scope
=
&
scope
.
NewScope
();
...
...
@@ -1125,11 +1141,11 @@ Scope* OperatorWithKernel::PrepareData(
// each result of different input will be the same with the first one.
// The reason is that if a gpu tensor is the input of a cpu kernel,
// we will create a new cpu tensor in new scope.
// However, if enable_cache_runtime_context, we get the cpu tensor each
// However, if enable_cache_runtime_context
_
, we get the cpu tensor each
// time, not the gpu tensor.
// Thus, we set pre_scope_ = nullptr to trigger `new RuntimeContext()` in
// RunImpl().
if
(
enable_cache_runtime_context
)
{
if
(
enable_cache_runtime_context
_
)
{
pre_scope_
=
nullptr
;
}
...
...
paddle/fluid/framework/operator.h
浏览文件 @
02149ed1
...
...
@@ -499,9 +499,10 @@ class OperatorWithKernel : public OperatorBase {
mutable
std
::
unique_ptr
<
OpKernelFunc
>
kernel_func_
;
mutable
std
::
unique_ptr
<
RuntimeContext
>
runtime_ctx_
;
mutable
const
Scope
*
pre_scope_
=
nullptr
;
mutable
bool
enable_cache_runtime_context
=
false
;
mutable
bool
all_kernels_must_compute_runtime_shape
=
false
;
mutable
bool
enable_cache_runtime_context
_
=
false
;
mutable
bool
all_kernels_must_compute_runtime_shape
_
=
false
;
mutable
std
::
mutex
cache_update_mutex_
;
mutable
bool
enable_cache_transfer_scope_
=
false
;
};
extern
bool
OpSupportGPU
(
const
std
::
string
&
op_type
);
...
...
paddle/fluid/memory/allocation/allocator_facade.cc
浏览文件 @
02149ed1
...
...
@@ -295,7 +295,9 @@ class AllocatorFacadePrivate {
// Pimpl. Make interface clean.
AllocatorFacade
::
AllocatorFacade
()
:
m_
(
new
AllocatorFacadePrivate
())
{}
AllocatorFacade
::~
AllocatorFacade
()
{
delete
m_
;
}
// delete m_ may cause core dump when the destructor of python in conflict with
// cpp.
AllocatorFacade
::~
AllocatorFacade
()
{}
AllocatorFacade
&
AllocatorFacade
::
Instance
()
{
static
AllocatorFacade
instance
;
...
...
paddle/fluid/pybind/pybind_boost_headers.h
浏览文件 @
02149ed1
...
...
@@ -77,6 +77,15 @@ struct paddle_variant_caster<V<Ts...>> {
}
}
if
(
std
::
is_same
<
T
,
float
>::
value
)
{
auto
caster_int64
=
make_caster
<
int64_t
>
();
if
(
caster_int64
.
load
(
src
,
convert
))
{
VLOG
(
4
)
<<
"this value are float and int64 satisfy simula."
;
value
=
cast_op
<
int64_t
>
(
caster_int64
);
return
true
;
}
}
value
=
cast_op
<
T
>
(
caster
);
return
true
;
}
...
...
python/paddle/fluid/tests/unittests/test_fill_constant_op.py
浏览文件 @
02149ed1
...
...
@@ -50,6 +50,34 @@ class TestFillConstantOp2(OpTest):
self
.
check_output
()
class
TestFillConstantOp3
(
OpTest
):
def
setUp
(
self
):
'''Test fill_constant op with specified int64 value
'''
self
.
op_type
=
"fill_constant"
self
.
inputs
=
{}
self
.
attrs
=
{
'shape'
:
[
123
,
92
],
'value'
:
10000000000
}
self
.
outputs
=
{
'Out'
:
np
.
full
((
123
,
92
),
10000000000
)}
def
test_check_output
(
self
):
self
.
check_output
()
class
TestFillConstantOp4
(
OpTest
):
def
setUp
(
self
):
'''Test fill_constant op with specified int value
'''
self
.
op_type
=
"fill_constant"
self
.
inputs
=
{}
self
.
attrs
=
{
'shape'
:
[
123
,
92
],
'value'
:
3
}
self
.
outputs
=
{
'Out'
:
np
.
full
((
123
,
92
),
3
)}
def
test_check_output
(
self
):
self
.
check_output
()
class
TestFillConstantOpWithSelectedRows
(
OpTest
):
def
check_with_place
(
self
,
place
):
scope
=
core
.
Scope
()
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录