Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
0be4b04d
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
0be4b04d
编写于
4月 18, 2020
作者:
G
GaoWei8
提交者:
GitHub
4月 18, 2020
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Api (lod_append) error message enhancement (#23541)
上级
81c4def9
变更
6
隐藏空白更改
内联
并排
Showing
6 changed file
with
129 addition
and
39 deletion
+129
-39
paddle/fluid/operators/concat_op.h
paddle/fluid/operators/concat_op.h
+16
-14
paddle/fluid/operators/lod_reset_op.cc
paddle/fluid/operators/lod_reset_op.cc
+3
-3
paddle/fluid/operators/lod_reset_op.h
paddle/fluid/operators/lod_reset_op.h
+13
-16
python/paddle/fluid/layers/nn.py
python/paddle/fluid/layers/nn.py
+8
-5
python/paddle/fluid/tests/unittests/test_layers.py
python/paddle/fluid/tests/unittests/test_layers.py
+1
-1
python/paddle/fluid/tests/unittests/test_lod_append_op.py
python/paddle/fluid/tests/unittests/test_lod_append_op.py
+88
-0
未找到文件。
paddle/fluid/operators/concat_op.h
浏览文件 @
0be4b04d
...
@@ -47,13 +47,13 @@ static inline framework::DDim ComputeAndCheckShape(
...
@@ -47,13 +47,13 @@ static inline framework::DDim ComputeAndCheckShape(
is_runtime
||
(
out_dims
[
j
]
>
0
&&
inputs_dims
[
i
][
j
]
>
0
);
is_runtime
||
(
out_dims
[
j
]
>
0
&&
inputs_dims
[
i
][
j
]
>
0
);
if
(
check_shape
)
{
if
(
check_shape
)
{
// check all shape in run time
// check all shape in run time
PADDLE_ENFORCE_EQ
(
PADDLE_ENFORCE_EQ
(
inputs_dims
[
0
][
j
],
inputs_dims
[
i
][
j
],
inputs_dims
[
0
][
j
],
inputs_dims
[
i
][
j
],
platform
::
errors
::
InvalidArgument
(
platform
::
errors
::
InvalidArgument
(
"The %d-th dimension of input[0] and input[%d] "
"The shape of input[%d] must be equal to input[0].
"
"is expected to be equal.
"
"But received input[0]'s shape = "
"But received input[0]'s shape = "
"[%s], input[%d]'s shape = [%s]."
,
"[%s], input[%d]'s shape = [%s]."
,
i
,
inputs_dims
[
0
],
i
,
inputs_dims
[
i
]));
j
,
i
,
inputs_dims
[
0
],
i
,
inputs_dims
[
i
]));
}
}
}
}
}
}
...
@@ -79,9 +79,9 @@ class ConcatKernel : public framework::OpKernel<T> {
...
@@ -79,9 +79,9 @@ class ConcatKernel : public framework::OpKernel<T> {
void
Compute
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
void
Compute
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
auto
ins
=
ctx
.
MultiInput
<
framework
::
LoDTensor
>
(
"X"
);
auto
ins
=
ctx
.
MultiInput
<
framework
::
LoDTensor
>
(
"X"
);
framework
::
LoDTensor
*
out
=
ctx
.
Output
<
framework
::
LoDTensor
>
(
"Out"
);
framework
::
LoDTensor
*
out
=
ctx
.
Output
<
framework
::
LoDTensor
>
(
"Out"
);
PADDLE_ENFORCE_NOT_NULL
(
PADDLE_ENFORCE_NOT_NULL
(
ins
[
0
],
ins
[
0
],
platform
::
errors
::
NotFound
(
platform
::
errors
::
NotFound
(
" The first input of concat should not be null
."
));
"The first input tensor is not initalized
."
));
auto
axis
=
ctx
.
Attr
<
int
>
(
"axis"
);
auto
axis
=
ctx
.
Attr
<
int
>
(
"axis"
);
bool
need_resize_out_dims
=
false
;
bool
need_resize_out_dims
=
false
;
if
(
ctx
.
HasInput
(
"AxisTensor"
))
{
if
(
ctx
.
HasInput
(
"AxisTensor"
))
{
...
@@ -116,7 +116,9 @@ class ConcatKernel : public framework::OpKernel<T> {
...
@@ -116,7 +116,9 @@ class ConcatKernel : public framework::OpKernel<T> {
platform
::
errors
::
Unimplemented
(
platform
::
errors
::
Unimplemented
(
"The lod level of all input LoDTensors should be same. "
"The lod level of all input LoDTensors should be same. "
"Maybe different lod level of input LoDTensors can concat,"
"Maybe different lod level of input LoDTensors can concat,"
" it is not supported currently."
));
"it is not supported currently. The lod level of %dth input "
"is %d and first input is %d."
,
i
,
ins
[
i
]
->
lod
().
size
(),
lod_size_0
));
}
else
{
}
else
{
lod_size
=
0
;
lod_size
=
0
;
break
;
break
;
...
@@ -181,9 +183,9 @@ class ConcatGradKernel : public framework::OpKernel<T> {
...
@@ -181,9 +183,9 @@ class ConcatGradKernel : public framework::OpKernel<T> {
}
}
}
}
}
}
PADDLE_ENFORCE_NOT_NULL
(
PADDLE_ENFORCE_NOT_NULL
(
ins
[
0
],
ins
[
0
],
platform
::
errors
::
NotFound
(
platform
::
errors
::
NotFound
(
"The first input of concat should not be null
."
));
"The first input tensor is not initalized
."
));
auto
axis
=
ctx
.
Attr
<
int
>
(
"axis"
);
auto
axis
=
ctx
.
Attr
<
int
>
(
"axis"
);
if
(
ctx
.
HasInput
(
"AxisTensor"
))
{
if
(
ctx
.
HasInput
(
"AxisTensor"
))
{
...
...
paddle/fluid/operators/lod_reset_op.cc
浏览文件 @
0be4b04d
...
@@ -32,9 +32,9 @@ class LoDResetOp : public framework::OperatorWithKernel {
...
@@ -32,9 +32,9 @@ class LoDResetOp : public framework::OperatorWithKernel {
PADDLE_ENFORCE_GT
(
PADDLE_ENFORCE_GT
(
static_cast
<
int64_t
>
(
level0
.
size
()),
0
,
static_cast
<
int64_t
>
(
level0
.
size
()),
0
,
platform
::
errors
::
InvalidArgument
(
platform
::
errors
::
InvalidArgument
(
"If Input(Y)
not provided, the target lod
should be "
"If Input(Y)
is not provided, the output's LoD
should be "
"specified by attribute
`target_lod`
. But the size of "
"specified by attribute
'target_lod'
. But the size of "
"
`target_lod`
is 0."
));
"
'target_lod'
is 0."
));
}
else
if
(
ctx
->
IsRuntime
())
{
}
else
if
(
ctx
->
IsRuntime
())
{
ctx
->
ShareLoD
(
"Y"
,
"Out"
);
ctx
->
ShareLoD
(
"Y"
,
"Out"
);
}
}
...
...
paddle/fluid/operators/lod_reset_op.h
浏览文件 @
0be4b04d
...
@@ -41,10 +41,10 @@ class LoDResetKernel : public framework::OpKernel<T> {
...
@@ -41,10 +41,10 @@ class LoDResetKernel : public framework::OpKernel<T> {
PADDLE_ENFORCE_EQ
(
PADDLE_ENFORCE_EQ
(
static_cast
<
int64_t
>
(
last_level
.
back
()),
in
->
dims
()[
0
],
static_cast
<
int64_t
>
(
last_level
.
back
()),
in
->
dims
()[
0
],
platform
::
errors
::
InvalidArgument
(
platform
::
errors
::
InvalidArgument
(
"The last value of
`Y`
's last level LoD should be equal "
"The last value of
Input(Y)
's last level LoD should be equal "
"to the first dimension of
`X`. But received the last value of
"
"to the first dimension of
Input(X). But received the last
"
"
`Y`'s last level LoD is %d, the first dimension of `X` is
"
"
value of Input(Y)'s last level LoD is %d, the first dimension
"
"
%d.
"
,
"
of Input(X) is %d.
"
,
static_cast
<
int64_t
>
(
last_level
.
back
()),
in
->
dims
()[
0
]));
static_cast
<
int64_t
>
(
last_level
.
back
()),
in
->
dims
()[
0
]));
out
->
set_lod
(
y_lod
);
out
->
set_lod
(
y_lod
);
return
;
// early return, since lod already set
return
;
// early return, since lod already set
...
@@ -75,19 +75,16 @@ class LoDResetKernel : public framework::OpKernel<T> {
...
@@ -75,19 +75,16 @@ class LoDResetKernel : public framework::OpKernel<T> {
PADDLE_ENFORCE_EQ
(
PADDLE_ENFORCE_EQ
(
static_cast
<
int64_t
>
(
level0
.
back
()),
in
->
dims
()[
0
],
static_cast
<
int64_t
>
(
level0
.
back
()),
in
->
dims
()[
0
],
platform
::
errors
::
InvalidArgument
(
platform
::
errors
::
InvalidArgument
(
"The last value of `Target LoD`'s last level LoD should be equal "
"The last value of 'Target LoD''s last level LoD should be equal "
"to the first dimension of `X`. But received the last value of "
"to the first dimension of Input(X). But received the 'Target LoD' "
"`Target LoD`'s last level LoD is %d, the first dimension of `X` "
"is %s, Input(X)'s shape is is %s."
,
"is "
framework
::
make_ddim
(
level0
),
in
->
dims
()));
"%d. "
,
static_cast
<
int64_t
>
(
level0
.
back
()),
in
->
dims
()[
0
]));
for
(
size_t
i
=
0
;
i
<
level0
.
size
()
-
1
;
++
i
)
{
for
(
size_t
i
=
0
;
i
<
level0
.
size
()
-
1
;
++
i
)
{
PADDLE_ENFORCE_GE
(
PADDLE_ENFORCE_GE
(
level0
[
i
+
1
],
level0
[
i
],
level0
[
i
+
1
],
level0
[
i
],
platform
::
errors
::
InvalidArgument
(
platform
::
errors
::
InvalidArgument
(
"'Target LoD' should be an ascending "
"Target LoD should be an ascending vector. But the %s element is "
"vector. But received the Target LoD is %s."
,
"%s and the %s element of Target LoD is %s."
,
framework
::
make_ddim
(
level0
)));
i
+
1
,
level0
[
i
+
1
],
i
,
level0
[
i
]));
}
}
// cast level0 to size_t
// cast level0 to size_t
...
...
python/paddle/fluid/layers/nn.py
浏览文件 @
0be4b04d
...
@@ -6265,11 +6265,9 @@ def lod_reset(x, y=None, target_lod=None):
...
@@ -6265,11 +6265,9 @@ def lod_reset(x, y=None, target_lod=None):
helper = LayerHelper("lod_reset", **locals())
helper = LayerHelper("lod_reset", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
out = helper.create_variable_for_type_inference(dtype=x.dtype)
if y is not None:
if y is not None:
if y.lod_level > 0:
check_type(y, 'y', (Variable), 'lod_reset')
check_variable_and_dtype(
if y.lod_level == 0:
y, 'y', ['float32', 'float64', 'int32', 'int64'], 'lod_reset')
check_variable_and_dtype(y, 'y', ['int32'], 'lod_reset')
else:
check_variable_and_dtype(y, 'y', ['int32', 'int64'], 'lod_reset')
helper.append_op(
helper.append_op(
type="lod_reset", inputs={'X': x,
type="lod_reset", inputs={'X': x,
'Y': y}, outputs={'Out': out})
'Y': y}, outputs={'Out': out})
...
@@ -6327,6 +6325,9 @@ def lod_append(x, level):
...
@@ -6327,6 +6325,9 @@ def lod_append(x, level):
if (not isinstance(level, Iterable)) and (not isinstance(level, Variable)):
if (not isinstance(level, Iterable)) and (not isinstance(level, Variable)):
raise ValueError("Input(level) must be list, tuple or Variable.")
raise ValueError("Input(level) must be list, tuple or Variable.")
check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'],
'lod_append')
helper = LayerHelper("lod_append", **locals())
helper = LayerHelper("lod_append", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
out = helper.create_variable_for_type_inference(dtype=x.dtype)
...
@@ -6335,6 +6336,8 @@ def lod_append(x, level):
...
@@ -6335,6 +6336,8 @@ def lod_append(x, level):
if isinstance(level, Variable):
if isinstance(level, Variable):
inputs['Y'] = level
inputs['Y'] = level
if level.lod_level == 0:
check_variable_and_dtype(level, 'level', ['int32'], 'lod_append')
else:
else:
attrs['target_lod'] = level
attrs['target_lod'] = level
helper.append_op(
helper.append_op(
...
...
python/paddle/fluid/tests/unittests/test_layers.py
浏览文件 @
0be4b04d
...
@@ -3033,7 +3033,7 @@ class TestBook(LayerTest):
...
@@ -3033,7 +3033,7 @@ class TestBook(LayerTest):
z
=
layers
.
lod_reset
(
x
=
x
,
y
=
y
)
z
=
layers
.
lod_reset
(
x
=
x
,
y
=
y
)
self
.
assertTrue
(
z
.
lod_level
==
2
)
self
.
assertTrue
(
z
.
lod_level
==
2
)
# case 2
# case 2
lod_tensor_in
=
layers
.
data
(
name
=
'lod_in'
,
shape
=
[
1
],
dtype
=
'int
64
'
)
lod_tensor_in
=
layers
.
data
(
name
=
'lod_in'
,
shape
=
[
1
],
dtype
=
'int
32
'
)
z
=
layers
.
lod_reset
(
x
=
x
,
y
=
lod_tensor_in
)
z
=
layers
.
lod_reset
(
x
=
x
,
y
=
lod_tensor_in
)
self
.
assertTrue
(
z
.
lod_level
==
1
)
self
.
assertTrue
(
z
.
lod_level
==
1
)
# case 3
# case 3
...
...
python/paddle/fluid/tests/unittests/test_lod_append_op.py
0 → 100644
浏览文件 @
0be4b04d
#Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from
__future__
import
print_function
import
unittest
import
numpy
as
np
import
paddle.fluid
as
fluid
import
paddle.fluid.layers
as
layers
import
paddle.fluid.core
as
core
from
paddle.fluid
import
compiler
,
Program
,
program_guard
from
paddle.fluid.op
import
Operator
from
paddle.fluid.backward
import
append_backward
class
TestLoDAppendAPI
(
unittest
.
TestCase
):
def
test_api
(
self
,
use_cuda
=
False
):
main_program
=
Program
()
with
fluid
.
program_guard
(
main_program
):
x
=
fluid
.
layers
.
data
(
name
=
'x'
,
shape
=
[
6
],
dtype
=
'float32'
)
result
=
fluid
.
layers
.
lod_append
(
x
,
[
0
,
2
,
6
])
x_i
=
np
.
array
([
1.0
,
1.0
,
1.0
,
1.0
,
1.0
,
1.0
]).
astype
(
"float32"
)
for
use_cuda
in
[
False
,
True
]:
if
use_cuda
and
not
fluid
.
core
.
is_compiled_with_cuda
():
return
place
=
fluid
.
CUDAPlace
(
0
)
if
use_cuda
else
fluid
.
CPUPlace
()
exe
=
fluid
.
Executor
(
place
)
[
out
]
=
exe
.
run
(
fluid
.
default_main_program
(),
feed
=
{
'x'
:
x_i
},
fetch_list
=
[
result
],
return_numpy
=
False
)
self
.
assertEqual
(
out
.
recursive_sequence_lengths
(),
[[
2
,
4
]])
class
TestLodAppendOpError
(
unittest
.
TestCase
):
def
test_errors
(
self
):
with
program_guard
(
Program
()):
def
test_x_Variable
():
# The input(x) must be Variable.
x1
=
np
.
array
([
0.9383
,
0.1983
,
3.2
,
1.2
]).
astype
(
"float64"
)
level1
=
[
0
,
2
,
4
]
fluid
.
layers
.
lod_append
(
x1
,
level1
)
self
.
assertRaises
(
TypeError
,
fluid
.
layers
.
lod_append
,
x1
,
level1
)
def
test_level_Variable
():
# The input(level) must be Variable or list.
x2
=
fluid
.
layers
.
data
(
name
=
'x2'
,
shape
=
[
4
],
dtype
=
'float32'
)
level2
=
2
fluid
.
layers
.
lod_append
(
x2
,
level2
)
self
.
assertRaises
(
TypeError
,
fluid
.
layers
.
lod_append
,
x2
,
level2
)
def
test_x_dtype
():
for
dtype
in
[
"bool"
,
"float16"
]:
x3
=
fluid
.
layers
.
data
(
name
=
'x3_'
+
dtype
,
shape
=
[
4
],
dtype
=
dtype
)
level3
=
fluid
.
layers
.
data
(
name
=
'level3'
,
shape
=
[
4
],
dtype
=
'int32'
,
lod_level
=
2
)
self
.
assertRaises
(
TypeError
,
fluid
.
layers
.
lod_append
,
x3
,
level3
)
def
test_level_dtype
():
for
dtype
in
[
"bool"
,
"float16"
,
"float32"
,
"float64"
,
"int64"
]:
x4
=
fluid
.
layers
.
data
(
name
=
'x4_'
+
dtype
,
shape
=
[
4
],
dtype
=
'float32'
)
level4
=
fluid
.
layers
.
data
(
name
=
'level4'
,
shape
=
[
4
],
dtype
=
dtype
,
lod_level
=
0
)
self
.
assertRaises
(
TypeError
,
fluid
.
layers
.
lod_append
,
x4
,
level4
)
if
__name__
==
"__main__"
:
unittest
.
main
()
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录