Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
3e9319f3
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
3e9319f3
编写于
3月 18, 2019
作者:
X
Xin Pan
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
add more imperative layer tests.
test=develop
上级
2579ade4
变更
4
隐藏空白更改
内联
并排
Showing
4 changed file
with
66 addition
and
8 deletion
+66
-8
paddle/fluid/imperative/layer.cc
paddle/fluid/imperative/layer.cc
+2
-4
paddle/fluid/imperative/tracer.cc
paddle/fluid/imperative/tracer.cc
+5
-4
python/paddle/fluid/layers/nn.py
python/paddle/fluid/layers/nn.py
+5
-0
python/paddle/fluid/tests/unittests/test_layers.py
python/paddle/fluid/tests/unittests/test_layers.py
+54
-0
未找到文件。
paddle/fluid/imperative/layer.cc
浏览文件 @
3e9319f3
...
...
@@ -214,10 +214,8 @@ framework::LoDTensor& VarBase::GradValue() {
}
std
::
map
<
std
::
string
,
std
::
vector
<
VarBase
*>>
OpBase
::
ApplyGrad
()
{
if
(
grad_op_descs_
.
empty
()
&&
backward_id_
<=
0
)
{
VLOG
(
3
)
<<
"op with no grad: "
<<
Type
();
return
{};
}
PADDLE_ENFORCE
(
!
grad_op_descs_
.
empty
()
||
backward_id_
>
0
,
"%s has no backward implementation"
,
Type
());
VLOG
(
3
)
<<
"apply op grad: "
<<
Type
();
std
::
vector
<
framework
::
VariableValueMap
>
tmp_grad_outputs
;
...
...
paddle/fluid/imperative/tracer.cc
浏览文件 @
3e9319f3
...
...
@@ -46,11 +46,12 @@ void CreateGradOp(const framework::OpDesc& op_desc,
std
::
vector
<
framework
::
OpDesc
*>*
grad_op_descs
,
std
::
unordered_map
<
std
::
string
,
std
::
string
>*
grad_to_var
)
{
PADDLE_ENFORCE
(
grad_op_descs
->
empty
());
std
::
vector
<
std
::
unique_ptr
<
framework
::
OpDesc
>>
descs
=
framework
::
OpInfoMap
::
Instance
()
.
Get
(
op_desc
.
Type
())
.
GradOpMaker
()(
op_desc
,
no_grad_set
,
grad_to_var
,
grad_sub_block
);
const
framework
::
OpInfo
&
op_info
=
framework
::
OpInfoMap
::
Instance
().
Get
(
op_desc
.
Type
());
if
(
!
op_info
.
grad_op_maker_
)
return
;
std
::
vector
<
std
::
unique_ptr
<
framework
::
OpDesc
>>
descs
=
op_info
.
GradOpMaker
()(
op_desc
,
no_grad_set
,
grad_to_var
,
grad_sub_block
);
for
(
auto
&
desc
:
descs
)
{
grad_op_descs
->
emplace_back
(
desc
.
release
());
}
...
...
python/paddle/fluid/layers/nn.py
浏览文件 @
3e9319f3
...
...
@@ -24,6 +24,7 @@ import inspect
from
..layer_helper
import
LayerHelper
from
..initializer
import
Normal
,
Constant
,
NumpyArrayInitializer
from
..framework
import
Variable
,
OpProtoHolder
,
_in_imperative_mode
from
..imperative
import
base
from
..param_attr
import
ParamAttr
from
.layer_function_generator
import
autodoc
,
templatedoc
,
_generate_doc_string_
from
.tensor
import
concat
,
assign
...
...
@@ -9138,6 +9139,10 @@ def _elementwise_op(helper):
op_type
=
helper
.
layer_type
x
=
helper
.
kwargs
.
get
(
'x'
,
None
)
y
=
helper
.
kwargs
.
get
(
'y'
,
None
)
if
_in_imperative_mode
():
x
=
base
.
to_variable
(
x
)
y
=
base
.
to_variable
(
y
)
assert
x
is
not
None
,
'x cannot be None in {}'
.
format
(
op_type
)
assert
y
is
not
None
,
'y cannot be None in {}'
.
format
(
op_type
)
axis
=
helper
.
kwargs
.
get
(
'axis'
,
-
1
)
...
...
python/paddle/fluid/tests/unittests/test_layers.py
浏览文件 @
3e9319f3
...
...
@@ -174,6 +174,60 @@ class TestLayer(LayerTest):
self
.
assertTrue
(
np
.
allclose
(
static_ret
[
i
],
static_ret2
[
i
]))
self
.
assertTrue
(
np
.
allclose
(
static_ret
[
i
],
dy_ret
[
i
].
_numpy
()))
def
test_elementwise_math
(
self
):
n
=
np
.
ones
([
3
,
3
],
dtype
=
'float32'
)
n2
=
np
.
ones
([
3
,
3
],
dtype
=
'float32'
)
*
1.1
n3
=
np
.
ones
([
3
,
3
],
dtype
=
'float32'
)
*
2
n4
=
np
.
ones
([
3
,
3
],
dtype
=
'float32'
)
*
3
n5
=
np
.
ones
([
3
,
3
],
dtype
=
'float32'
)
*
4
n6
=
np
.
ones
([
3
,
3
],
dtype
=
'float32'
)
*
5
with
self
.
static_graph
():
t
=
layers
.
data
(
name
=
't'
,
shape
=
[
3
,
3
],
dtype
=
'float32'
)
t2
=
layers
.
data
(
name
=
't2'
,
shape
=
[
3
,
3
],
dtype
=
'float32'
)
t3
=
layers
.
data
(
name
=
't3'
,
shape
=
[
3
,
3
],
dtype
=
'float32'
)
t4
=
layers
.
data
(
name
=
't4'
,
shape
=
[
3
,
3
],
dtype
=
'float32'
)
t5
=
layers
.
data
(
name
=
't5'
,
shape
=
[
3
,
3
],
dtype
=
'float32'
)
t6
=
layers
.
data
(
name
=
't6'
,
shape
=
[
3
,
3
],
dtype
=
'float32'
)
ret
=
layers
.
elementwise_add
(
t
,
t2
)
ret
=
layers
.
elementwise_pow
(
ret
,
t3
)
ret
=
layers
.
elementwise_div
(
ret
,
t4
)
ret
=
layers
.
elementwise_sub
(
ret
,
t5
)
ret
=
layers
.
elementwise_mul
(
ret
,
t6
)
static_ret
=
self
.
get_static_graph_result
(
feed
=
{
't'
:
n
,
't2'
:
n2
,
't3'
:
n3
,
't4'
:
n4
,
't5'
:
n5
,
't6'
:
n6
},
fetch_list
=
[
ret
])[
0
]
with
self
.
dynamic_graph
():
ret
=
layers
.
elementwise_add
(
n
,
n2
)
ret
=
layers
.
elementwise_pow
(
ret
,
n3
)
ret
=
layers
.
elementwise_div
(
ret
,
n4
)
ret
=
layers
.
elementwise_sub
(
ret
,
n5
)
dy_ret
=
layers
.
elementwise_mul
(
ret
,
n6
)
self
.
assertTrue
(
np
.
allclose
(
static_ret
,
dy_ret
.
_numpy
()),
'%s vs %s'
%
(
static_ret
,
dy_ret
.
_numpy
()))
def
test_elementwise_minmax
(
self
):
n
=
np
.
ones
([
3
,
3
],
dtype
=
'float32'
)
n2
=
np
.
ones
([
3
,
3
],
dtype
=
'float32'
)
*
2
with
self
.
dynamic_graph
():
min_ret
=
layers
.
elementwise_min
(
n
,
n2
)
max_ret
=
layers
.
elementwise_max
(
n
,
n2
)
self
.
assertTrue
(
np
.
allclose
(
n
,
min_ret
.
_numpy
()))
self
.
assertTrue
(
np
.
allclose
(
n2
,
max_ret
.
_numpy
()))
class
TestBook
(
unittest
.
TestCase
):
def
test_fit_a_line
(
self
):
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录