Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
29c4fae1
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
29c4fae1
编写于
4月 07, 2020
作者:
W
wangchaochaohu
提交者:
GitHub
4月 07, 2020
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Tensor value support (#23491)
* add support for value tensor support of fill_constant Op
上级
e8efaee9
变更
8
隐藏空白更改
内联
并排
Showing
8 changed file
with
170 addition
and
81 deletion
+170
-81
paddle/fluid/operators/fill_constant_op.cc
paddle/fluid/operators/fill_constant_op.cc
+5
-10
paddle/fluid/operators/fill_constant_op.h
paddle/fluid/operators/fill_constant_op.h
+16
-0
paddle/fluid/operators/optimizers/adam_op.cc
paddle/fluid/operators/optimizers/adam_op.cc
+0
-13
paddle/fluid/operators/optimizers/adam_op.cu
paddle/fluid/operators/optimizers/adam_op.cu
+8
-0
paddle/fluid/operators/optimizers/adam_op.h
paddle/fluid/operators/optimizers/adam_op.h
+8
-0
python/paddle/fluid/layers/tensor.py
python/paddle/fluid/layers/tensor.py
+29
-56
python/paddle/fluid/layers/utils.py
python/paddle/fluid/layers/utils.py
+49
-0
python/paddle/fluid/tests/unittests/test_fill_constant_op.py
python/paddle/fluid/tests/unittests/test_fill_constant_op.py
+55
-2
未找到文件。
paddle/fluid/operators/fill_constant_op.cc
浏览文件 @
29c4fae1
...
...
@@ -48,16 +48,6 @@ class FillConstantOp : public framework::OperatorWithKernel {
framework
::
proto
::
VarType
::
Type
(
ctx
.
Attr
<
int
>
(
"dtype"
)),
ctx
.
GetPlace
());
}
framework
::
OpKernelType
GetKernelTypeForVar
(
const
std
::
string
&
var_name
,
const
Tensor
&
tensor
,
const
framework
::
OpKernelType
&
expected_kernel_type
)
const
override
{
if
(
var_name
==
"ShapeTensor"
||
var_name
==
"ShapeTensorList"
)
{
return
expected_kernel_type
;
}
return
framework
::
OpKernelType
(
expected_kernel_type
.
data_type_
,
tensor
.
place
(),
tensor
.
layout
());
}
};
class
FillConstantOpVarTypeInference
:
public
framework
::
VarTypeInference
{
...
...
@@ -80,6 +70,11 @@ class FillConstantOpMaker : public framework::OpProtoAndCheckerMaker {
AddAttr
<
std
::
vector
<
int64_t
>>
(
"shape"
,
"(vector<int64_t>) The shape of the output"
)
.
SetDefault
({});
AddInput
(
"ValueTensor"
,
"(Tensor, optional) If provided, fill_constant Op will use this "
"as value to set the output Tensor, this has a higher priority "
"than attr(str_value), the shape of this tensor MUST BE [1]."
)
.
AsDispensable
();
AddInput
(
"ShapeTensor"
,
"(Tensor<int>), optional). The shape of the output."
"It has a higher priority than Attr(shape)."
)
...
...
paddle/fluid/operators/fill_constant_op.h
浏览文件 @
29c4fae1
...
...
@@ -99,6 +99,22 @@ class FillConstantKernel : public framework::OpKernel<T> {
value
=
static_cast
<
T
>
(
tmp_value
);
}
}
if
(
ctx
.
HasInput
(
"ValueTensor"
))
{
auto
*
value_tensor
=
ctx
.
Input
<
framework
::
Tensor
>
(
"ValueTensor"
);
PADDLE_ENFORCE_EQ
(
value_tensor
->
numel
(),
1
,
platform
::
errors
::
InvalidArgument
(
"When use Tensor as value to set Tensor value in fill_cosntant, "
"value input(ValueTensor) size must be 1, but get %d"
,
value_tensor
->
numel
()));
const
T
*
tensor_data
=
value_tensor
->
data
<
T
>
();
framework
::
Tensor
cpu_tensor
;
if
(
platform
::
is_gpu_place
(
value_tensor
->
place
()))
{
TensorCopySync
(
*
value_tensor
,
platform
::
CPUPlace
(),
&
cpu_tensor
);
tensor_data
=
cpu_tensor
.
data
<
T
>
();
}
value
=
tensor_data
[
0
];
}
auto
shape
=
GetShape
(
ctx
);
if
(
out_var
->
IsType
<
framework
::
LoDTensor
>
())
{
...
...
paddle/fluid/operators/optimizers/adam_op.cc
浏览文件 @
29c4fae1
...
...
@@ -42,19 +42,6 @@ void AdamOp::InferShape(framework::InferShapeContext *ctx) const {
platform
::
errors
::
NotFound
(
"Input(Beta2Pow) of AdamOp should not be null."
));
if
(
ctx
->
IsRuntime
()
&&
ctx
->
HasInput
(
"Beta1Tensor"
))
{
auto
beta1
=
ctx
->
Inputs
(
"Beta1Tensor"
);
PADDLE_ENFORCE_EQ
(
beta1
.
size
(),
1
,
platform
::
errors
::
InvalidArgument
(
"Input(Beta1Tensor) size must be 1"
));
}
if
(
ctx
->
IsRuntime
()
&&
ctx
->
HasInput
(
"Beta2Tensor"
))
{
auto
beta2
=
ctx
->
Inputs
(
"Beta2Tensor"
);
PADDLE_ENFORCE_EQ
(
beta2
.
size
(),
1
,
platform
::
errors
::
InvalidArgument
(
"Input(Beta2Tensor) size must be 1"
));
}
PADDLE_ENFORCE_EQ
(
ctx
->
HasOutput
(
"ParamOut"
),
true
,
platform
::
errors
::
NotFound
(
"Output(ParamOut) of AdamOp should not be null."
));
...
...
paddle/fluid/operators/optimizers/adam_op.cu
浏览文件 @
29c4fae1
...
...
@@ -151,11 +151,19 @@ class AdamOpCUDAKernel : public framework::OpKernel<T> {
T
beta1
=
static_cast
<
T
>
(
ctx
.
Attr
<
float
>
(
"beta1"
));
if
(
ctx
.
HasInput
(
"Beta1Tensor"
))
{
auto
*
beta1_tensor
=
ctx
.
Input
<
framework
::
Tensor
>
(
"Beta1Tensor"
);
PADDLE_ENFORCE_EQ
(
beta1_tensor
->
numel
(),
1
,
platform
::
errors
::
InvalidArgument
(
"Input(Beta1Tensor) size must be 1, but get %d"
,
beta1_tensor
->
numel
()));
beta1
=
static_cast
<
T
>
(
GetAttrFromTensor
(
beta1_tensor
));
}
T
beta2
=
static_cast
<
T
>
(
ctx
.
Attr
<
float
>
(
"beta2"
));
if
(
ctx
.
HasInput
(
"Beta2Tensor"
))
{
auto
*
beta2_tensor
=
ctx
.
Input
<
framework
::
Tensor
>
(
"Beta2Tensor"
);
PADDLE_ENFORCE_EQ
(
beta2_tensor
->
numel
(),
1
,
platform
::
errors
::
InvalidArgument
(
"Input(Beta2Tensor) size must be 1, but get %d"
,
beta2_tensor
->
numel
()));
beta2
=
static_cast
<
T
>
(
GetAttrFromTensor
(
beta2_tensor
));
}
VLOG
(
3
)
<<
"beta1_pow.numel() : "
<<
beta1_pow
->
numel
()
...
...
paddle/fluid/operators/optimizers/adam_op.h
浏览文件 @
29c4fae1
...
...
@@ -406,11 +406,19 @@ class AdamOpKernel : public framework::OpKernel<T> {
T
beta1
=
static_cast
<
T
>
(
ctx
.
Attr
<
float
>
(
"beta1"
));
if
(
ctx
.
HasInput
(
"Beta1Tensor"
))
{
auto
*
beta1_tensor
=
ctx
.
Input
<
framework
::
Tensor
>
(
"Beta1Tensor"
);
PADDLE_ENFORCE_EQ
(
beta1_tensor
->
numel
(),
1
,
platform
::
errors
::
InvalidArgument
(
"Input(Beta1Tensor) size must be 1, but get %d"
,
beta1_tensor
->
numel
()));
beta1
=
static_cast
<
T
>
(
GetAttrFromTensor
(
beta1_tensor
));
}
T
beta2
=
static_cast
<
T
>
(
ctx
.
Attr
<
float
>
(
"beta2"
));
if
(
ctx
.
HasInput
(
"Beta2Tensor"
))
{
auto
*
beta2_tensor
=
ctx
.
Input
<
framework
::
Tensor
>
(
"Beta2Tensor"
);
PADDLE_ENFORCE_EQ
(
beta2_tensor
->
numel
(),
1
,
platform
::
errors
::
InvalidArgument
(
"Input(Beta2Tensor) size must be 1, but get %d"
,
beta2_tensor
->
numel
()));
beta2
=
static_cast
<
T
>
(
GetAttrFromTensor
(
beta2_tensor
));
}
VLOG
(
3
)
<<
"beta1_pow.numel() : "
<<
beta1_pow
->
numel
()
...
...
python/paddle/fluid/layers/tensor.py
浏览文件 @
29c4fae1
...
...
@@ -550,8 +550,9 @@ def fill_constant(shape, dtype, value, force_cpu=False, out=None):
If ``shape`` is an Variable, it should be an 1-D Tensor .
dtype(np.dtype|core.VarDesc.VarType|str): Data type of the output tensor which can
be float16, float32, float64, int32, int64.
value(float): The constant value used to initialize the Tensor to be created.
force_cpu(True): data should be on CPU if it's true, default value is False.
value(float16|float32|float64|int32|int64|Variable): The constant value used to initialize
the Tensor to be created. If value is an Variable, it should be an 1-D Tensor.
force_cpu(bool): data should be on CPU if it's true, default value is False.
out(Variable, optional): Optional output which can be any created
Variable that meets the requirements to store the result of operation.
if out is None, a new Varibale will be create to store the result.
...
...
@@ -579,13 +580,21 @@ def fill_constant(shape, dtype, value, force_cpu=False, out=None):
# attr shape is an Variable Tensor.
shape = fluid.layers.fill_constant([1,2], "int32", 2) # shape=[2,2]
data4 = fluid.layers.fill_constant(shape=shape, dtype='bool', value=True) # data4=[[True,True],[True,True]]
# attr value is an Variable Tensor.
val = fluid.layers.fill_constant([1], "float32", 2.0) # val=[2.0]
data5 = fluid.layers.fill_constant(shape=[2,1], value=val, dtype='float32') #data5=[[2.0],[2.0]]
"""
attrs
=
{
'value'
:
float
(
value
),
'force_cpu'
:
force_cpu
}
if
convert_dtype
(
dtype
)
in
[
'int64'
,
'int32'
]
:
attrs
[
'str_value'
]
=
str
(
int
(
value
))
inputs
=
{
}
attrs
=
{
'force_cpu'
:
force_cpu
}
if
isinstance
(
value
,
Variable
)
:
inputs
[
'ValueTensor'
]
=
value
else
:
attrs
[
'str_value'
]
=
str
(
float
(
value
))
attrs
[
'value'
]
=
float
(
value
)
if
convert_dtype
(
dtype
)
in
[
'int64'
,
'int32'
]:
attrs
[
'str_value'
]
=
str
(
int
(
value
))
else
:
attrs
[
'str_value'
]
=
str
(
float
(
value
))
if
in_dygraph_mode
():
if
isinstance
(
shape
,
(
list
,
tuple
)):
...
...
@@ -596,6 +605,13 @@ def fill_constant(shape, dtype, value, force_cpu=False, out=None):
shape
=
list
(
shape
.
numpy
().
astype
(
int
))
if
out
is
None
:
out
=
_varbase_creator
(
dtype
=
dtype
)
if
isinstance
(
value
,
Variable
):
if
convert_dtype
(
dtype
)
in
[
'int64'
,
'int32'
]:
attrs
[
'str_value'
]
=
str
(
int
(
value
.
numpy
()))
else
:
attrs
[
'str_value'
]
=
str
(
float
(
value
.
numpy
()))
core
.
ops
.
fill_constant
(
out
,
'value'
,
float
(
value
),
'force_cpu'
,
force_cpu
,
'dtype'
,
out
.
dtype
,
'str_value'
,
attrs
[
'str_value'
],
...
...
@@ -608,55 +624,12 @@ def fill_constant(shape, dtype, value, force_cpu=False, out=None):
[
'bool'
,
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'fill_constant'
)
check_type
(
shape
,
'shape'
,
(
Variable
,
list
,
tuple
),
'fill_constant'
)
inputs
=
{}
attrs
=
{
'value'
:
float
(
value
),
'force_cpu'
:
force_cpu
}
if
convert_dtype
(
dtype
)
in
[
'int64'
,
'int32'
]:
attrs
[
'str_value'
]
=
str
(
int
(
value
))
else
:
attrs
[
'str_value'
]
=
str
(
float
(
value
))
def
_get_attr_shape
(
list_shape
):
attr_shape
=
[]
for
idx
,
dim
in
enumerate
(
list_shape
):
if
isinstance
(
dim
,
Variable
):
attr_shape
.
append
(
-
1
)
else
:
attr_shape
.
append
(
dim
)
return
attr_shape
def
_get_shape_tensor
(
list_shape
):
new_shape_tensor
=
[]
for
idx
,
dim
in
enumerate
(
list_shape
):
if
isinstance
(
dim
,
Variable
):
dim
.
stop_gradient
=
True
check_dtype
(
dim
.
dtype
,
'shape['
+
str
(
idx
)
+
']'
,
[
'int32'
,
'int64'
],
'fill_constant'
,
'(When type of shape in fill_constant is list or tuple.)'
)
if
convert_dtype
(
dim
.
dtype
)
==
'int64'
:
dim
=
cast
(
x
=
dim
,
dtype
=
'int32'
)
new_shape_tensor
.
append
(
dim
)
else
:
temp_out
=
helper
.
create_variable_for_type_inference
(
'int32'
)
fill_constant
([
1
],
'int32'
,
dim
,
force_cpu
=
True
,
out
=
temp_out
)
new_shape_tensor
.
append
(
temp_out
)
return
new_shape_tensor
if
isinstance
(
shape
,
Variable
):
shape
.
stop_gradient
=
True
check_dtype
(
shape
.
dtype
,
'shape'
,
[
'int32'
,
'int64'
],
'fill_constant'
,
'(When type of shape in fill_constant is Variable.)'
)
if
(
convert_dtype
(
shape
.
dtype
)
==
'int64'
):
shape
=
cast
(
shape
,
'int32'
)
inputs
[
"ShapeTensor"
]
=
shape
elif
isinstance
(
shape
,
(
list
,
tuple
)):
assert
len
(
shape
)
>
0
,
(
"The size of 'shape' in fill_constant can't be zero, "
"but received %s."
%
len
(
shape
))
attrs
[
"shape"
]
=
_get_attr_shape
(
shape
)
if
utils
.
_contain_var
(
shape
):
inputs
[
'ShapeTensorList'
]
=
_get_shape_tensor
(
shape
)
inputs
=
utils
.
_get_shape_tensor_inputs
(
inputs
=
inputs
,
helper
=
helper
,
attrs
=
attrs
,
shape
=
shape
,
op_type
=
'fill_constant'
)
if
out
is
None
:
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
dtype
)
...
...
python/paddle/fluid/layers/utils.py
浏览文件 @
29c4fae1
...
...
@@ -18,6 +18,8 @@ import copy
import
six
import
numpy
as
np
from
..framework
import
Variable
from
..data_feeder
import
convert_dtype
,
check_variable_and_dtype
,
check_type
,
check_dtype
from
..layer_helper
import
LayerHelper
def
convert_to_list
(
value
,
n
,
name
,
dtype
=
np
.
int
):
...
...
@@ -274,3 +276,50 @@ def _contain_var(list_or_tuple):
if
isinstance
(
item
,
Variable
):
return
True
return
False
def
_get_shape_tensor_inputs
(
inputs
,
helper
,
attrs
,
shape
,
op_type
):
from
.tensor
import
fill_constant
,
cast
def
_get_attr_shape
(
list_shape
):
attr_shape
=
[]
for
idx
,
dim
in
enumerate
(
list_shape
):
if
isinstance
(
dim
,
Variable
):
attr_shape
.
append
(
-
1
)
else
:
attr_shape
.
append
(
dim
)
return
attr_shape
def
_get_shape_tensor
(
list_shape
):
new_shape_tensor
=
[]
for
idx
,
dim
in
enumerate
(
list_shape
):
if
isinstance
(
dim
,
Variable
):
dim
.
stop_gradient
=
True
check_dtype
(
dim
.
dtype
,
'shape['
+
str
(
idx
)
+
']'
,
[
'int32'
,
'int64'
],
op_type
,
'(When type of shape in'
+
op_type
+
'is list or tuple.)'
)
if
convert_dtype
(
dim
.
dtype
)
==
'int64'
:
dim
=
cast
(
x
=
dim
,
dtype
=
'int32'
)
new_shape_tensor
.
append
(
dim
)
else
:
temp_out
=
fill_constant
([
1
],
'int32'
,
dim
,
force_cpu
=
True
)
new_shape_tensor
.
append
(
temp_out
)
return
new_shape_tensor
if
isinstance
(
shape
,
Variable
):
shape
.
stop_gradient
=
True
check_dtype
(
shape
.
dtype
,
'shape'
,
[
'int32'
,
'int64'
],
'fill_constant'
,
'(When type of shape in'
+
op_type
+
' is Variable.)'
)
if
(
convert_dtype
(
shape
.
dtype
)
==
'int64'
):
shape
=
cast
(
shape
,
'int32'
)
inputs
[
"ShapeTensor"
]
=
shape
elif
isinstance
(
shape
,
(
list
,
tuple
)):
assert
len
(
shape
)
>
0
,
(
"The size of 'shape' in"
+
op_type
+
" can't be zero, "
"but received %s."
%
len
(
shape
))
attrs
[
"shape"
]
=
_get_attr_shape
(
shape
)
if
_contain_var
(
shape
):
inputs
[
'ShapeTensorList'
]
=
_get_shape_tensor
(
shape
)
return
inputs
python/paddle/fluid/tests/unittests/test_fill_constant_op.py
浏览文件 @
29c4fae1
...
...
@@ -212,6 +212,54 @@ class TestFillConstantOp1_ShapeTensor(OpTest):
self
.
check_output
()
# Situation 4: value is a tensor
class
TestFillConstantOp1_ValueTensor
(
OpTest
):
def
setUp
(
self
):
'''Test fill_constant op with specified value
'''
self
.
op_type
=
"fill_constant"
self
.
init_data
()
self
.
inputs
=
{
"ShapeTensor"
:
np
.
array
(
self
.
shape
).
astype
(
"int32"
),
'ValueTensor'
:
np
.
array
([
self
.
value
]).
astype
(
"float32"
)
}
self
.
attrs
=
{
'value'
:
self
.
value
+
1.0
}
self
.
outputs
=
{
'Out'
:
np
.
full
(
self
.
shape
,
self
.
value
)}
def
init_data
(
self
):
self
.
shape
=
[
123
,
92
]
self
.
value
=
3.8
self
.
dtype
=
np
.
float32
def
test_check_output
(
self
):
self
.
check_output
()
# Situation 5: value is a tensor
class
TestFillConstantOp2_ValueTensor
(
OpTest
):
def
setUp
(
self
):
'''Test fill_constant op with specified value
'''
self
.
op_type
=
"fill_constant"
self
.
init_data
()
self
.
inputs
=
{
"ShapeTensor"
:
np
.
array
(
self
.
shape
).
astype
(
"int32"
),
'ValueTensor'
:
np
.
array
([
self
.
value
]).
astype
(
"int32"
)
}
self
.
attrs
=
{
'value'
:
self
.
value
,
'dtype'
:
2
}
self
.
outputs
=
{
'Out'
:
np
.
full
(
self
.
shape
,
self
.
value
)}
def
init_data
(
self
):
self
.
shape
=
[
123
,
92
]
self
.
value
=
3
self
.
dtype
=
np
.
int32
def
test_check_output
(
self
):
self
.
check_output
()
# Test python API
class
TestFillConstantAPI
(
unittest
.
TestCase
):
def
test_api
(
self
):
...
...
@@ -242,14 +290,18 @@ class TestFillConstantAPI(unittest.TestCase):
out_6
=
fluid
.
layers
.
fill_constant
(
shape
=
shape_tensor_int64
,
dtype
=
np
.
float32
,
value
=
1.1
)
val
=
fluid
.
layers
.
fill_constant
(
shape
=
[
1
],
dtype
=
np
.
float32
,
value
=
1.1
)
out_7
=
fluid
.
layers
.
fill_constant
(
shape
=
shape_tensor_int64
,
dtype
=
np
.
float32
,
value
=
val
)
exe
=
fluid
.
Executor
(
place
=
fluid
.
CPUPlace
())
res_1
,
res_2
,
res_3
,
res_4
,
res_5
,
res_6
=
exe
.
run
(
res_1
,
res_2
,
res_3
,
res_4
,
res_5
,
res_6
,
res_7
=
exe
.
run
(
fluid
.
default_main_program
(),
feed
=
{
"shape_tensor_int32"
:
np
.
array
([
1
,
2
]).
astype
(
"int32"
),
"shape_tensor_int64"
:
np
.
array
([
1
,
2
]).
astype
(
"int64"
),
},
fetch_list
=
[
out_1
,
out_2
,
out_3
,
out_4
,
out_5
,
out_6
])
fetch_list
=
[
out_1
,
out_2
,
out_3
,
out_4
,
out_5
,
out_6
,
out_7
])
assert
np
.
array_equal
(
res_1
,
np
.
full
([
1
,
2
],
1.1
,
dtype
=
"float32"
))
assert
np
.
array_equal
(
res_2
,
np
.
full
([
1
,
2
],
1.1
,
dtype
=
"float32"
))
...
...
@@ -257,6 +309,7 @@ class TestFillConstantAPI(unittest.TestCase):
assert
np
.
array_equal
(
res_4
,
np
.
full
([
1
,
2
],
1.1
,
dtype
=
"float32"
))
assert
np
.
array_equal
(
res_5
,
np
.
full
([
1
,
2
],
1.1
,
dtype
=
"float32"
))
assert
np
.
array_equal
(
res_6
,
np
.
full
([
1
,
2
],
1.1
,
dtype
=
"float32"
))
assert
np
.
array_equal
(
res_7
,
np
.
full
([
1
,
2
],
1.1
,
dtype
=
"float32"
))
class
TestFillConstantOpError
(
unittest
.
TestCase
):
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录