Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
9d46f443
P
Paddle
项目概览
PaddlePaddle
/
Paddle
大约 1 年 前同步成功
通知
2298
Star
20931
Fork
5422
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
9d46f443
编写于
9月 11, 2017
作者:
Q
qijun
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
fix attr bug in op_test and ensure order in duplicate inputs/outputs
上级
faf827ba
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
22 addition
and
14 deletion
+22
-14
python/paddle/v2/framework/tests/op_test.py
python/paddle/v2/framework/tests/op_test.py
+21
-13
python/paddle/v2/framework/tests/test_sum_op.py
python/paddle/v2/framework/tests/test_sum_op.py
+1
-1
未找到文件。
python/paddle/v2/framework/tests/op_test.py
浏览文件 @
9d46f443
...
@@ -9,7 +9,7 @@ def grad_var_name(var_name):
...
@@ -9,7 +9,7 @@ def grad_var_name(var_name):
return
var_name
+
"@GRAD"
return
var_name
+
"@GRAD"
def
create_op
(
scope
,
op_type
,
inputs
,
outputs
,
attrs
=
None
):
def
create_op
(
scope
,
op_type
,
inputs
,
outputs
,
attrs
):
kwargs
=
dict
()
kwargs
=
dict
()
for
in_name
,
in_dup
in
Operator
.
get_op_inputs
(
op_type
):
for
in_name
,
in_dup
in
Operator
.
get_op_inputs
(
op_type
):
...
@@ -17,7 +17,7 @@ def create_op(scope, op_type, inputs, outputs, attrs=None):
...
@@ -17,7 +17,7 @@ def create_op(scope, op_type, inputs, outputs, attrs=None):
kwargs
[
in_name
]
=
[]
kwargs
[
in_name
]
=
[]
if
in_dup
:
if
in_dup
:
sub_in
=
inputs
[
in_name
]
sub_in
=
inputs
[
in_name
]
for
sub_in_name
in
sub_in
:
for
sub_in_name
,
arr
in
sub_in
:
var
=
scope
.
new_var
(
sub_in_name
)
var
=
scope
.
new_var
(
sub_in_name
)
kwargs
[
in_name
].
append
(
sub_in_name
)
kwargs
[
in_name
].
append
(
sub_in_name
)
else
:
else
:
...
@@ -29,15 +29,16 @@ def create_op(scope, op_type, inputs, outputs, attrs=None):
...
@@ -29,15 +29,16 @@ def create_op(scope, op_type, inputs, outputs, attrs=None):
kwargs
[
out_name
]
=
[]
kwargs
[
out_name
]
=
[]
if
out_dup
:
if
out_dup
:
sub_in
=
outputs
[
out_name
]
sub_in
=
outputs
[
out_name
]
for
su
n_in_name
in
sub_in
:
for
su
b_in_name
,
arr
in
sub_in
:
var
=
scope
.
new_var
(
su
n
_in_name
)
var
=
scope
.
new_var
(
su
b
_in_name
)
kwargs
[
out_name
].
append
(
su
n
_in_name
)
kwargs
[
out_name
].
append
(
su
b
_in_name
)
else
:
else
:
var
=
scope
.
new_var
(
out_name
)
var
=
scope
.
new_var
(
out_name
)
kwargs
[
out_name
].
append
(
out_name
)
kwargs
[
out_name
].
append
(
out_name
)
for
attr_name
in
Operator
.
get_op_attr_names
(
op_type
):
for
attr_name
in
Operator
.
get_op_attr_names
(
op_type
):
kwargs
[
attr_name
]
=
attrs
[
attr_name
]
if
attr_name
in
attrs
:
kwargs
[
attr_name
]
=
attrs
[
attr_name
]
return
Operator
(
op_type
,
**
kwargs
)
return
Operator
(
op_type
,
**
kwargs
)
...
@@ -46,10 +47,9 @@ def set_input(scope, op, inputs, place):
...
@@ -46,10 +47,9 @@ def set_input(scope, op, inputs, place):
if
in_name
in
inputs
:
if
in_name
in
inputs
:
if
in_dup
:
if
in_dup
:
sub_in
=
inputs
[
in_name
]
sub_in
=
inputs
[
in_name
]
for
sub_in_name
in
sub_in
:
for
sub_in_name
,
arr
in
sub_in
:
var
=
scope
.
find_var
(
sub_in_name
)
var
=
scope
.
find_var
(
sub_in_name
)
tensor
=
var
.
get_tensor
()
tensor
=
var
.
get_tensor
()
arr
=
sub_in
[
sub_in_name
]
tensor
.
set_dims
(
arr
.
shape
)
tensor
.
set_dims
(
arr
.
shape
)
tensor
.
set
(
arr
,
place
)
tensor
.
set
(
arr
,
place
)
else
:
else
:
...
@@ -65,7 +65,7 @@ def set_output_grad(scope, op, outputs, place):
...
@@ -65,7 +65,7 @@ def set_output_grad(scope, op, outputs, place):
if
out_name
in
outputs
:
if
out_name
in
outputs
:
if
out_dup
:
if
out_dup
:
sub_out
=
outputs
[
out_name
]
sub_out
=
outputs
[
out_name
]
for
sub_out_name
in
sub_out
:
for
sub_out_name
,
arr
in
sub_out
:
out_tensor
=
scope
.
find_var
(
sub_out_name
).
get_tensor
()
out_tensor
=
scope
.
find_var
(
sub_out_name
).
get_tensor
()
grad_tensor
=
scope
.
new_var
(
grad_var_name
(
grad_tensor
=
scope
.
new_var
(
grad_var_name
(
sub_out_name
)).
get_tensor
()
sub_out_name
)).
get_tensor
()
...
@@ -110,7 +110,7 @@ def get_numeric_gradient(scope,
...
@@ -110,7 +110,7 @@ def get_numeric_gradient(scope,
# we use a for loop to compute the gradient of every element.
# we use a for loop to compute the gradient of every element.
for
i
in
xrange
(
tensor_size
):
for
i
in
xrange
(
tensor_size
):
if
in_place
:
if
in_place
:
set_input
(
op
,
inputs
,
core
.
CPUPlace
())
set_input
(
scope
,
op
,
inputs
,
core
.
CPUPlace
())
# get one input element throw it's index i.
# get one input element throw it's index i.
origin
=
tensor_to_check
.
get_float_element
(
i
)
origin
=
tensor_to_check
.
get_float_element
(
i
)
...
@@ -120,7 +120,7 @@ def get_numeric_gradient(scope,
...
@@ -120,7 +120,7 @@ def get_numeric_gradient(scope,
y_pos
=
get_output
()
y_pos
=
get_output
()
if
in_place
:
if
in_place
:
set_input
(
op
,
inputs
,
core
.
CPUPlace
())
set_input
(
scope
,
op
,
inputs
,
core
.
CPUPlace
())
x_neg
=
origin
-
delta
x_neg
=
origin
-
delta
tensor_to_check
.
set_float_element
(
i
,
x_neg
)
tensor_to_check
.
set_float_element
(
i
,
x_neg
)
...
@@ -168,7 +168,11 @@ def get_gradient(scope, op, inputs, outputs, grad_name, place,
...
@@ -168,7 +168,11 @@ def get_gradient(scope, op, inputs, outputs, grad_name, place,
class
OpTest
(
unittest
.
TestCase
):
class
OpTest
(
unittest
.
TestCase
):
def
check_output_with_place
(
self
,
place
):
def
check_output_with_place
(
self
,
place
):
self
.
scope
=
core
.
Scope
()
self
.
scope
=
core
.
Scope
()
self
.
op
=
create_op
(
self
.
scope
,
self
.
op_type
,
self
.
inputs
,
self
.
outputs
)
op_inputs
=
self
.
inputs
if
hasattr
(
self
,
"inputs"
)
else
dict
()
op_outputs
=
self
.
outputs
if
hasattr
(
self
,
"outputs"
)
else
dict
()
op_attrs
=
self
.
attrs
if
hasattr
(
self
,
"attrs"
)
else
dict
()
self
.
op
=
create_op
(
self
.
scope
,
self
.
op_type
,
op_inputs
,
op_outputs
,
op_attrs
)
if
isinstance
(
place
,
core
.
GPUPlace
)
and
not
self
.
op
.
support_gpu
():
if
isinstance
(
place
,
core
.
GPUPlace
)
and
not
self
.
op
.
support_gpu
():
return
return
set_input
(
self
.
scope
,
self
.
op
,
self
.
inputs
,
place
)
set_input
(
self
.
scope
,
self
.
op
,
self
.
inputs
,
place
)
...
@@ -227,7 +231,11 @@ class OpTest(unittest.TestCase):
...
@@ -227,7 +231,11 @@ class OpTest(unittest.TestCase):
in_place
=
False
,
in_place
=
False
,
max_relative_error
=
0.005
):
max_relative_error
=
0.005
):
self
.
scope
=
core
.
Scope
()
self
.
scope
=
core
.
Scope
()
self
.
op
=
create_op
(
self
.
scope
,
self
.
op_type
,
self
.
inputs
,
self
.
outputs
)
op_inputs
=
self
.
inputs
if
hasattr
(
self
,
"inputs"
)
else
dict
()
op_outputs
=
self
.
outputs
if
hasattr
(
self
,
"outputs"
)
else
dict
()
op_attrs
=
self
.
attrs
if
hasattr
(
self
,
"attrs"
)
else
dict
()
self
.
op
=
create_op
(
self
.
scope
,
self
.
op_type
,
op_inputs
,
op_outputs
,
op_attrs
)
if
no_grad_set
is
None
:
if
no_grad_set
is
None
:
no_grad_set
=
set
()
no_grad_set
=
set
()
...
...
python/paddle/v2/framework/tests/test_sum_op.py
浏览文件 @
9d46f443
...
@@ -9,7 +9,7 @@ class TestSumOp(OpTest):
...
@@ -9,7 +9,7 @@ class TestSumOp(OpTest):
x0
=
np
.
random
.
random
((
3
,
4
)).
astype
(
'float32'
)
x0
=
np
.
random
.
random
((
3
,
4
)).
astype
(
'float32'
)
x1
=
np
.
random
.
random
((
3
,
4
)).
astype
(
'float32'
)
x1
=
np
.
random
.
random
((
3
,
4
)).
astype
(
'float32'
)
x2
=
np
.
random
.
random
((
3
,
4
)).
astype
(
'float32'
)
x2
=
np
.
random
.
random
((
3
,
4
)).
astype
(
'float32'
)
self
.
inputs
=
{
"X"
:
{
"x0"
:
x0
,
"x1"
:
x1
,
"x2"
:
x2
}
}
self
.
inputs
=
{
"X"
:
[(
"x0"
,
x0
),
(
"x1"
,
x1
),
(
"x2"
,
x2
)]
}
y
=
x0
+
x1
+
x2
y
=
x0
+
x1
+
x2
self
.
outputs
=
{
'Out'
:
y
}
self
.
outputs
=
{
'Out'
:
y
}
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录