Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
4c9b3daf
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
4c9b3daf
编写于
12月 04, 2019
作者:
W
wangchaochaohu
提交者:
GitHub
12月 04, 2019
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
fill_constant_batch_size_like OP precious problem fix (#21337)
* fix fill_constant_batch_size_like_op precious problem test=develop
上级
46401786
变更
5
显示空白变更内容
内联
并排
Showing
5 changed file
with
89 addition
and
18 deletion
+89
-18
paddle/fluid/operators/fill_constant_batch_size_like_op.cc
paddle/fluid/operators/fill_constant_batch_size_like_op.cc
+2
-0
paddle/fluid/operators/fill_constant_batch_size_like_op.h
paddle/fluid/operators/fill_constant_batch_size_like_op.h
+36
-10
python/paddle/fluid/layers/tensor.py
python/paddle/fluid/layers/tensor.py
+13
-8
python/paddle/fluid/tests/unittests/test_fill_constant_batch_size_like_op.py
.../tests/unittests/test_fill_constant_batch_size_like_op.py
+30
-0
python/paddle/fluid/tests/unittests/test_layers.py
python/paddle/fluid/tests/unittests/test_layers.py
+8
-0
未找到文件。
paddle/fluid/operators/fill_constant_batch_size_like_op.cc
浏览文件 @
4c9b3daf
...
@@ -38,6 +38,8 @@ class FillConstantBatchSizeLikeOpMaker : public BatchSizeLikeOpMaker {
...
@@ -38,6 +38,8 @@ class FillConstantBatchSizeLikeOpMaker : public BatchSizeLikeOpMaker {
.
SetDefault
(
framework
::
proto
::
VarType
::
FP32
);
.
SetDefault
(
framework
::
proto
::
VarType
::
FP32
);
AddAttr
<
float
>
(
"value"
,
"default 0. The value to be filled"
)
AddAttr
<
float
>
(
"value"
,
"default 0. The value to be filled"
)
.
SetDefault
(
0.0
f
);
.
SetDefault
(
0.0
f
);
AddAttr
<
std
::
string
>
(
"str_value"
,
"default empty. The value to be filled"
)
.
SetDefault
(
""
);
AddAttr
<
bool
>
(
"force_cpu"
,
AddAttr
<
bool
>
(
"force_cpu"
,
"(bool, default false) Force fill output variable to cpu "
"(bool, default false) Force fill output variable to cpu "
"memory. Otherwise, fill output variable to the running "
"memory. Otherwise, fill output variable to the running "
...
...
paddle/fluid/operators/fill_constant_batch_size_like_op.h
浏览文件 @
4c9b3daf
...
@@ -13,6 +13,7 @@ See the License for the specific language governing permissions and
...
@@ -13,6 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */
limitations under the License. */
#pragma once
#pragma once
#include <string>
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/operators/math/math_function.h"
...
@@ -22,14 +23,15 @@ namespace operators {
...
@@ -22,14 +23,15 @@ namespace operators {
template
<
typename
DeviceContext
,
typename
T
>
template
<
typename
DeviceContext
,
typename
T
>
class
FillConstantBatchSizeLikeOpKernel
:
public
framework
::
OpKernel
<
T
>
{
class
FillConstantBatchSizeLikeOpKernel
:
public
framework
::
OpKernel
<
T
>
{
public:
public:
void
Compute
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
void
Compute
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
auto
data_type
=
auto
data_type
=
static_cast
<
framework
::
proto
::
VarType
::
Type
>
(
ctx
.
Attr
<
int
>
(
"dtype"
));
static_cast
<
framework
::
proto
::
VarType
::
Type
>
(
ctx
.
Attr
<
int
>
(
"dtype"
));
auto
value
=
ctx
.
Attr
<
float
>
(
"value"
);
auto
float_value
=
ctx
.
Attr
<
float
>
(
"value"
);
auto
str_value
=
ctx
.
Attr
<
std
::
string
>
(
"str_value"
);
auto
force_cpu
=
ctx
.
Attr
<
bool
>
(
"force_cpu"
);
auto
force_cpu
=
ctx
.
Attr
<
bool
>
(
"force_cpu"
);
auto
*
out
=
ctx
.
Output
<
framework
::
Tensor
>
(
"Out"
);
auto
*
out
=
ctx
.
Output
<
framework
::
Tensor
>
(
"Out"
);
auto
*
in
=
ctx
.
Input
<
framework
::
LoDTensor
>
(
"Input"
);
auto
*
in
=
ctx
.
Input
<
framework
::
LoDTensor
>
(
"Input"
);
if
(
in
->
lod
().
size
()
&&
ctx
.
Attr
<
int
>
(
"input_dim_idx"
)
==
0
)
{
if
(
in
->
lod
().
size
()
&&
ctx
.
Attr
<
int
>
(
"input_dim_idx"
)
==
0
)
{
// set the correct batch size for the LoDTensor.
// set the correct batch size for the LoDTensor.
auto
odims
=
out
->
dims
();
auto
odims
=
out
->
dims
();
...
@@ -38,15 +40,39 @@ class FillConstantBatchSizeLikeOpKernel : public framework::OpKernel<T> {
...
@@ -38,15 +40,39 @@ class FillConstantBatchSizeLikeOpKernel : public framework::OpKernel<T> {
out
->
mutable_data
<
T
>
(
odims
,
ctx
.
GetPlace
());
out
->
mutable_data
<
T
>
(
odims
,
ctx
.
GetPlace
());
}
}
if
(
force_cpu
)
{
T
value
;
out
->
mutable_data
(
platform
::
CPUPlace
(),
data_type
);
if
(
str_value
.
empty
())
{
value
=
static_cast
<
T
>
(
float_value
);
}
else
{
}
else
{
out
->
mutable_data
(
ctx
.
GetPlace
(),
data_type
);
std
::
stringstream
convert_stream
(
str_value
);
if
(
std
::
is_same
<
int64_t
,
T
>::
value
)
{
int64_t
tmp_value
;
convert_stream
>>
tmp_value
;
value
=
static_cast
<
T
>
(
tmp_value
);
}
else
{
double
tmp_value
;
convert_stream
>>
tmp_value
;
value
=
static_cast
<
T
>
(
tmp_value
);
}
}
}
platform
::
DeviceContextPool
&
pool
=
platform
::
DeviceContextPool
::
Instance
();
platform
::
DeviceContextPool
&
pool
=
platform
::
DeviceContextPool
::
Instance
();
auto
&
dev_ctx
=
*
pool
.
Get
(
ctx
.
GetPlace
());
auto
&
dev_ctx
=
*
pool
.
Get
(
ctx
.
GetPlace
());
math
::
set_constant
(
dev_ctx
,
out
,
value
);
bool
cpu_place
=
force_cpu
||
ctx
.
GetPlace
()
==
platform
::
CPUPlace
();
if
(
cpu_place
)
{
math
::
SetConstant
<
platform
::
CPUDeviceContext
,
T
>
functor
;
out
->
mutable_data
(
platform
::
CPUPlace
(),
data_type
);
functor
(
reinterpret_cast
<
const
platform
::
CPUDeviceContext
&>
(
dev_ctx
),
out
,
static_cast
<
T
>
(
value
));
}
#ifdef PADDLE_WITH_CUDA
if
(
!
cpu_place
)
{
math
::
SetConstant
<
platform
::
CUDADeviceContext
,
T
>
functor
;
out
->
mutable_data
(
ctx
.
GetPlace
(),
data_type
);
functor
(
reinterpret_cast
<
const
platform
::
CUDADeviceContext
&>
(
dev_ctx
),
out
,
static_cast
<
T
>
(
value
));
}
#endif
}
}
};
};
...
...
python/paddle/fluid/layers/tensor.py
浏览文件 @
4c9b3daf
...
@@ -668,18 +668,23 @@ def fill_constant_batch_size_like(input,
...
@@ -668,18 +668,23 @@ def fill_constant_batch_size_like(input,
"""
"""
helper
=
LayerHelper
(
"fill_constant_batch_size_like"
,
**
locals
())
helper
=
LayerHelper
(
"fill_constant_batch_size_like"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
dtype
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
dtype
)
helper
.
append_op
(
attrs
=
{
type
=
'fill_constant_batch_size_like'
,
inputs
=
{
'Input'
:
input
},
outputs
=
{
'Out'
:
[
out
]},
attrs
=
{
'shape'
:
shape
,
'shape'
:
shape
,
'dtype'
:
out
.
dtype
,
'dtype'
:
out
.
dtype
,
'value'
:
float
(
value
),
'value'
:
float
(
value
),
'input_dim_idx'
:
input_dim_idx
,
'input_dim_idx'
:
input_dim_idx
,
'output_dim_idx'
:
output_dim_idx
,
'output_dim_idx'
:
output_dim_idx
,
'force_cpu'
:
force_cpu
or
force_init_on_cpu
()
'force_cpu'
:
force_cpu
or
force_init_on_cpu
()
})
}
if
convert_dtype
(
dtype
)
in
[
'int64'
,
'int32'
]:
attrs
[
'str_value'
]
=
str
(
int
(
value
))
else
:
attrs
[
'str_value'
]
=
str
(
float
(
value
))
helper
.
append_op
(
type
=
'fill_constant_batch_size_like'
,
inputs
=
{
'Input'
:
input
},
outputs
=
{
'Out'
:
[
out
]},
attrs
=
attrs
)
out
.
stop_gradient
=
True
out
.
stop_gradient
=
True
return
out
return
out
...
...
python/paddle/fluid/tests/unittests/test_fill_constant_batch_size_like_op.py
浏览文件 @
4c9b3daf
...
@@ -16,6 +16,7 @@ from __future__ import print_function
...
@@ -16,6 +16,7 @@ from __future__ import print_function
import
unittest
import
unittest
import
numpy
as
np
import
numpy
as
np
import
paddle.fluid
as
fluid
from
op_test
import
OpTest
from
op_test
import
OpTest
...
@@ -52,6 +53,20 @@ class TestFillConstantBatchSizeLikeWhenSecondDimIsBatchSize(OpTest):
...
@@ -52,6 +53,20 @@ class TestFillConstantBatchSizeLikeWhenSecondDimIsBatchSize(OpTest):
self
.
check_output
()
self
.
check_output
()
class
TestFillConstantBatchSizeLikeInt64
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"fill_constant_batch_size_like"
self
.
inputs
=
{
'Input'
:
np
.
random
.
random
((
219
,
232
)).
astype
(
"int64"
)}
self
.
attrs
=
{
'value'
:
5894589485094
,
'shape'
:
[
-
1
,
132
,
7
]}
out
=
np
.
random
.
random
((
219
,
132
,
7
)).
astype
(
"int64"
)
out
.
fill
(
5894589485094
)
self
.
outputs
=
{
'Out'
:
out
}
def
test_check_output
(
self
):
self
.
check_output
()
class
TestFillConstantBatchSizeLikeWithLoDTensor
(
OpTest
):
class
TestFillConstantBatchSizeLikeWithLoDTensor
(
OpTest
):
def
setUp
(
self
):
def
setUp
(
self
):
self
.
op_type
=
"fill_constant_batch_size_like"
self
.
op_type
=
"fill_constant_batch_size_like"
...
@@ -74,5 +89,20 @@ class TestFillConstantBatchSizeLikeWithLoDTensor(OpTest):
...
@@ -74,5 +89,20 @@ class TestFillConstantBatchSizeLikeWithLoDTensor(OpTest):
self
.
check_output
()
self
.
check_output
()
# Test python API
class
TestFillConstantBatchSizeLikeAPI
(
OpTest
):
def
test_api
(
self
):
like
=
fluid
.
layers
.
fill_constant
(
shape
=
[
1
,
200
],
value
=
10
,
dtype
=
'int64'
)
out
=
fluid
.
layers
.
fill_constant_batch_size_like
(
input
=
like
,
shape
=
[
2
,
300
],
value
=
1315454564656
,
dtype
=
'int64'
)
exe
=
fluid
.
Executor
(
place
=
fluid
.
CPUPlace
())
res
,
=
exe
.
run
(
fluid
.
default_main_program
(),
fetch_list
=
[
out
])
assert
np
.
array_equal
(
res
[
0
],
np
.
full
(
[
300
],
1315454564656
,
dtype
=
"int64"
))
if
__name__
==
"__main__"
:
if
__name__
==
"__main__"
:
unittest
.
main
()
unittest
.
main
()
python/paddle/fluid/tests/unittests/test_layers.py
浏览文件 @
4c9b3daf
...
@@ -2682,6 +2682,14 @@ class TestBook(LayerTest):
...
@@ -2682,6 +2682,14 @@ class TestBook(LayerTest):
x
,
axes
=
axes
,
starts
=
starts
,
ends
=
ends
,
strides
=
strides
)
x
,
axes
=
axes
,
starts
=
starts
,
ends
=
ends
,
strides
=
strides
)
return
out
return
out
def
test_fill_constant_batch_size_like
(
self
):
with
self
.
static_graph
():
like
=
fluid
.
layers
.
fill_constant
(
shape
=
[
1
,
200
],
value
=
10
,
dtype
=
'int64'
)
out
=
layers
.
fill_constant_batch_size_like
(
input
=
like
,
shape
=
[
2
,
3300
],
value
=
1315454564656
,
dtype
=
'int64'
)
return
out
def
test_psroi_pool
(
self
):
def
test_psroi_pool
(
self
):
# TODO(minqiyang): dygraph do not support lod now
# TODO(minqiyang): dygraph do not support lod now
with
self
.
static_graph
():
with
self
.
static_graph
():
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录