Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
2d807f2b
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
2d807f2b
编写于
9月 11, 2017
作者:
Q
qijun
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
init refine op python tests
上级
c1696696
变更
23
显示空白变更内容
内联
并排
Showing
23 changed file
with
267 addition
and
423 deletion
+267
-423
python/paddle/v2/framework/tests/CMakeLists.txt
python/paddle/v2/framework/tests/CMakeLists.txt
+0
-2
python/paddle/v2/framework/tests/op_test.py
python/paddle/v2/framework/tests/op_test.py
+19
-9
python/paddle/v2/framework/tests/test_add_two_op.py
python/paddle/v2/framework/tests/test_add_two_op.py
+10
-13
python/paddle/v2/framework/tests/test_cos_sim_op.py
python/paddle/v2/framework/tests/test_cos_sim_op.py
+14
-35
python/paddle/v2/framework/tests/test_cross_entropy_op.py
python/paddle/v2/framework/tests/test_cross_entropy_op.py
+1
-1
python/paddle/v2/framework/tests/test_fill_zeros_like_op.py
python/paddle/v2/framework/tests/test_fill_zeros_like_op.py
+10
-9
python/paddle/v2/framework/tests/test_gather_op.py
python/paddle/v2/framework/tests/test_gather_op.py
+11
-21
python/paddle/v2/framework/tests/test_gaussian_random_op.py
python/paddle/v2/framework/tests/test_gaussian_random_op.py
+4
-4
python/paddle/v2/framework/tests/test_gradient_checker.py
python/paddle/v2/framework/tests/test_gradient_checker.py
+22
-20
python/paddle/v2/framework/tests/test_lookup_table.py
python/paddle/v2/framework/tests/test_lookup_table.py
+10
-19
python/paddle/v2/framework/tests/test_mean_op.py
python/paddle/v2/framework/tests/test_mean_op.py
+10
-14
python/paddle/v2/framework/tests/test_minus_op.py
python/paddle/v2/framework/tests/test_minus_op.py
+8
-15
python/paddle/v2/framework/tests/test_mul_op.py
python/paddle/v2/framework/tests/test_mul_op.py
+27
-71
python/paddle/v2/framework/tests/test_net.py
python/paddle/v2/framework/tests/test_net.py
+1
-1
python/paddle/v2/framework/tests/test_rowwise_add_op.py
python/paddle/v2/framework/tests/test_rowwise_add_op.py
+28
-45
python/paddle/v2/framework/tests/test_scale_and_identity_op.py
...n/paddle/v2/framework/tests/test_scale_and_identity_op.py
+16
-25
python/paddle/v2/framework/tests/test_scatter_op.py
python/paddle/v2/framework/tests/test_scatter_op.py
+12
-25
python/paddle/v2/framework/tests/test_sgd_op.py
python/paddle/v2/framework/tests/test_sgd_op.py
+9
-8
python/paddle/v2/framework/tests/test_softmax_op.py
python/paddle/v2/framework/tests/test_softmax_op.py
+10
-18
python/paddle/v2/framework/tests/test_squared_l2_distance_op.py
.../paddle/v2/framework/tests/test_squared_l2_distance_op.py
+29
-47
python/paddle/v2/framework/tests/test_sum_op.py
python/paddle/v2/framework/tests/test_sum_op.py
+6
-6
python/paddle/v2/framework/tests/test_top_k_op.py
python/paddle/v2/framework/tests/test_top_k_op.py
+6
-11
python/paddle/v2/framework/tests/test_uniform_random_op.py
python/paddle/v2/framework/tests/test_uniform_random_op.py
+4
-4
未找到文件。
python/paddle/v2/framework/tests/CMakeLists.txt
浏览文件 @
2d807f2b
...
...
@@ -19,8 +19,6 @@ py_test(test_scatter_op SRCS test_scatter_op.py)
py_test
(
test_fill_zeros_like_op SRCS test_fill_zeros_like_op.py
)
py_test
(
test_top_k_op SRCS test_top_k_op.py
)
py_test
(
gradient_checker SRCS gradient_checker.py
)
py_test
(
test_rowwise_add_op SRCS test_rowwise_add_op.py
)
py_test
(
test_default_scope_funcs SRCS test_default_scope_funcs.py
)
...
...
python/paddle/v2/framework/tests/op_test.py
浏览文件 @
2d807f2b
...
...
@@ -9,7 +9,7 @@ def grad_var_name(var_name):
return
var_name
+
"@GRAD"
def
create_op
(
scope
,
op_type
,
inputs
,
outputs
,
attrs
=
None
):
def
create_op
(
scope
,
op_type
,
inputs
,
outputs
,
attrs
):
kwargs
=
dict
()
for
in_name
,
in_dup
in
Operator
.
get_op_inputs
(
op_type
):
...
...
@@ -29,14 +29,15 @@ def create_op(scope, op_type, inputs, outputs, attrs=None):
kwargs
[
out_name
]
=
[]
if
out_dup
:
sub_in
=
outputs
[
out_name
]
for
su
n
_in_name
in
sub_in
:
var
=
scope
.
new_var
(
su
n
_in_name
)
kwargs
[
out_name
].
append
(
su
n
_in_name
)
for
su
b
_in_name
in
sub_in
:
var
=
scope
.
new_var
(
su
b
_in_name
)
kwargs
[
out_name
].
append
(
su
b
_in_name
)
else
:
var
=
scope
.
new_var
(
out_name
)
kwargs
[
out_name
].
append
(
out_name
)
for
attr_name
in
Operator
.
get_op_attr_names
(
op_type
):
if
attr_name
in
attrs
:
kwargs
[
attr_name
]
=
attrs
[
attr_name
]
return
Operator
(
op_type
,
**
kwargs
)
...
...
@@ -89,6 +90,7 @@ def get_numeric_gradient(scope,
delta
=
0.005
,
in_place
=
False
):
print
"before set input"
set_input
(
scope
,
op
,
inputs
,
core
.
CPUPlace
())
op
.
infer_shape
(
scope
)
...
...
@@ -110,7 +112,7 @@ def get_numeric_gradient(scope,
# we use a for loop to compute the gradient of every element.
for
i
in
xrange
(
tensor_size
):
if
in_place
:
set_input
(
op
,
inputs
,
core
.
CPUPlace
())
set_input
(
scope
,
op
,
inputs
,
core
.
CPUPlace
())
# get one input element throw it's index i.
origin
=
tensor_to_check
.
get_float_element
(
i
)
...
...
@@ -120,7 +122,7 @@ def get_numeric_gradient(scope,
y_pos
=
get_output
()
if
in_place
:
set_input
(
op
,
inputs
,
core
.
CPUPlace
())
set_input
(
scope
,
op
,
inputs
,
core
.
CPUPlace
())
x_neg
=
origin
-
delta
tensor_to_check
.
set_float_element
(
i
,
x_neg
)
...
...
@@ -168,7 +170,11 @@ def get_gradient(scope, op, inputs, outputs, grad_name, place,
class
OpTest
(
unittest
.
TestCase
):
def
check_output_with_place
(
self
,
place
):
self
.
scope
=
core
.
Scope
()
self
.
op
=
create_op
(
self
.
scope
,
self
.
op_type
,
self
.
inputs
,
self
.
outputs
)
op_inputs
=
self
.
inputs
if
hasattr
(
self
,
"inputs"
)
else
dict
()
op_outputs
=
self
.
outputs
if
hasattr
(
self
,
"outputs"
)
else
dict
()
op_attrs
=
self
.
attrs
if
hasattr
(
self
,
"attrs"
)
else
dict
()
self
.
op
=
create_op
(
self
.
scope
,
self
.
op_type
,
op_inputs
,
op_outputs
,
op_attrs
)
if
isinstance
(
place
,
core
.
GPUPlace
)
and
not
self
.
op
.
support_gpu
():
return
set_input
(
self
.
scope
,
self
.
op
,
self
.
inputs
,
place
)
...
...
@@ -227,7 +233,11 @@ class OpTest(unittest.TestCase):
in_place
=
False
,
max_relative_error
=
0.005
):
self
.
scope
=
core
.
Scope
()
self
.
op
=
create_op
(
self
.
scope
,
self
.
op_type
,
self
.
inputs
,
self
.
outputs
)
op_inputs
=
self
.
inputs
if
hasattr
(
self
,
"inputs"
)
else
dict
()
op_outputs
=
self
.
outputs
if
hasattr
(
self
,
"outputs"
)
else
dict
()
op_attrs
=
self
.
attrs
if
hasattr
(
self
,
"attrs"
)
else
dict
()
self
.
op
=
create_op
(
self
.
scope
,
self
.
op_type
,
op_inputs
,
op_outputs
,
op_attrs
)
if
no_grad_set
is
None
:
no_grad_set
=
set
()
...
...
python/paddle/v2/framework/tests/test_add_two_op.py
浏览文件 @
2d807f2b
import
unittest
import
numpy
as
np
from
op_test
import
OpTest
import
numpy
import
paddle.v2.framework.core
as
core
from
paddle.v2.framework.op
import
Operator
from
op_test_util
import
OpTestMeta
class
TestAddOp
(
unittest
.
TestCase
):
__metaclass__
=
OpTestMeta
class
TestAddOp
(
OpTest
):
def
setUp
(
self
):
self
.
type
=
"add"
self
.
op_
type
=
"add"
self
.
inputs
=
{
'X'
:
n
umpy
.
random
.
random
((
102
,
105
)).
astype
(
"float32"
),
'Y'
:
n
umpy
.
random
.
random
((
102
,
105
)).
astype
(
"float32"
)
'X'
:
n
p
.
random
.
random
((
102
,
105
)).
astype
(
"float32"
),
'Y'
:
n
p
.
random
.
random
((
102
,
105
)).
astype
(
"float32"
)
}
self
.
outputs
=
{
'Out'
:
self
.
inputs
[
'X'
]
+
self
.
inputs
[
'Y'
]}
def
test_check_output
(
self
):
self
.
check_output
()
if
__name__
==
'__main__'
:
if
__name__
==
"__main__"
:
unittest
.
main
()
python/paddle/v2/framework/tests/test_cos_sim_op.py
浏览文件 @
2d807f2b
import
unittest
import
numpy
as
np
from
gradient_checker
import
GradientChecker
,
create_op
from
op_test_util
import
OpTestMeta
from
op_test
import
OpTest
class
TestCosSimOp
(
unittest
.
TestCase
):
__metaclass__
=
OpTestMeta
class
TestCosSimOp
(
OpTest
):
def
setUp
(
self
):
self
.
type
=
"cos_sim"
self
.
op_
type
=
"cos_sim"
self
.
inputs
=
{
'X'
:
np
.
random
.
random
((
32
,
64
)).
astype
(
"float32"
),
'Y'
:
np
.
random
.
random
((
32
,
64
)).
astype
(
"float32"
)
'X'
:
np
.
random
.
random
((
10
,
5
)).
astype
(
"float32"
),
'Y'
:
np
.
random
.
random
((
10
,
5
)).
astype
(
"float32"
)
}
expect_x_norm
=
np
.
linalg
.
norm
(
self
.
inputs
[
'X'
],
axis
=
1
)
expect_y_norm
=
np
.
linalg
.
norm
(
self
.
inputs
[
'Y'
],
axis
=
1
)
...
...
@@ -23,38 +20,20 @@ class TestCosSimOp(unittest.TestCase):
'Out'
:
np
.
expand_dims
(
expect_out
,
1
)
}
def
test_check_output
(
self
):
self
.
check_output
()
class
TestCosSimGradOp
(
GradientChecker
):
def
setUp
(
self
):
self
.
op
=
create_op
(
"cos_sim"
)
self
.
inputs
=
{
'X'
:
np
.
random
.
random
((
10
,
5
)).
astype
(
"float32"
),
'Y'
:
np
.
random
.
random
((
10
,
5
)).
astype
(
"float32"
)
}
def
test_cpu_gpu_compare
(
self
):
self
.
compare_grad
(
self
.
op
,
self
.
inputs
)
def
test_normal
(
self
):
self
.
check_grad
(
self
.
op
,
self
.
inputs
,
[
"X"
,
"Y"
],
"Out"
,
max_relative_error
=
0.05
)
def
test_check_grad_normal
(
self
):
self
.
check_grad
([
'X'
,
'Y'
],
'Out'
,
max_relative_error
=
0.05
)
def
test_
ign
ore_x
(
self
):
def
test_
check_grad_ing
ore_x
(
self
):
self
.
check_grad
(
self
.
op
,
self
.
inputs
,
[
"Y"
],
"Out"
,
max_relative_error
=
0.05
,
no_grad_set
=
{
"X"
})
[
'Y'
],
'Out'
,
max_relative_error
=
0.05
,
no_grad_set
=
set
(
'X'
))
def
test_ignore_y
(
self
):
def
test_
check_grad_
ignore_y
(
self
):
self
.
check_grad
(
self
.
op
,
self
.
inputs
,
[
"X"
],
"Out"
,
max_relative_error
=
0.05
,
no_grad_set
=
{
"Y"
})
[
'X'
],
'Out'
,
max_relative_error
=
0.05
,
no_grad_set
=
set
(
'Y'
))
if
__name__
==
'__main__'
:
if
__name__
==
"__main__"
:
unittest
.
main
()
python/paddle/v2/framework/tests/test_cross_entropy_op.py
浏览文件 @
2d807f2b
...
...
@@ -21,7 +21,7 @@ class TestCrossEntropy(OpTest):
self
.
check_output
()
def
test_check_grad
(
self
):
self
.
check_grad
([
"X"
],
"Y"
)
self
.
check_grad
([
'X'
],
'Y'
)
if
__name__
==
"__main__"
:
...
...
python/paddle/v2/framework/tests/test_fill_zeros_like_op.py
浏览文件 @
2d807f2b
import
unittest
from
op_test_util
import
OpTestMeta
import
numpy
import
numpy
as
np
from
op_test
import
OpTest
class
TestFillZerosLikeOp
(
unittest
.
TestCase
):
__metaclass__
=
OpTestMeta
class
TestFillZerosLikeOp
(
OpTest
):
def
setUp
(
self
):
self
.
type
=
"fill_zeros_like"
self
.
inputs
=
{
'Src'
:
numpy
.
random
.
random
((
219
,
232
)).
astype
(
"float32"
)}
self
.
outputs
=
{
'Dst'
:
numpy
.
zeros_like
(
self
.
inputs
[
'Src'
])}
self
.
op_type
=
"fill_zeros_like"
self
.
inputs
=
{
'Src'
:
np
.
random
.
random
((
219
,
232
)).
astype
(
"float32"
)}
self
.
outputs
=
{
'Dst'
:
np
.
zeros_like
(
self
.
inputs
[
"Src"
])}
def
test_check_output
(
self
):
self
.
check_output
()
if
__name__
==
'__main__'
:
if
__name__
==
"__main__"
:
unittest
.
main
()
python/paddle/v2/framework/tests/test_gather_op.py
浏览文件 @
2d807f2b
import
unittest
from
op_test_util
import
OpTestMeta
from
gradient_checker
import
GradientChecker
,
create_op
import
numpy
import
paddle.v2.framework.core
as
core
from
paddle.v2.framework.op
import
Operator
import
numpy
as
np
from
op_test
import
OpTest
class
TestGatherOp
(
unittest
.
TestCase
):
__metaclass__
=
OpTestMeta
class
TestGatherOp
(
OpTest
):
def
setUp
(
self
):
self
.
type
=
"gather"
xnp
=
numpy
.
random
.
random
((
10
,
20
)).
astype
(
"float32"
)
self
.
inputs
=
{
'X'
:
xnp
,
'Index'
:
numpy
.
array
([
1
,
3
,
5
]).
astype
(
"int32"
)
}
self
.
outputs
=
{
'Out'
:
self
.
inputs
[
'X'
][
self
.
inputs
[
'Index'
]]}
self
.
op_type
=
"gather"
xnp
=
np
.
random
.
random
((
10
,
20
)).
astype
(
"float32"
)
self
.
inputs
=
{
'X'
:
xnp
,
'Index'
:
np
.
array
([
1
,
3
,
5
]).
astype
(
"int32"
)}
self
.
outputs
=
{
'Out'
:
self
.
inputs
[
"X"
][
self
.
inputs
[
"Index"
]]}
def
test_check_output
(
self
):
self
.
check_output
()
class
TestGatherGradOp
(
GradientChecker
):
def
test_gather_grad
(
self
):
op
=
create_op
(
"gather"
)
xnp
=
numpy
.
random
.
random
((
10
,
20
)).
astype
(
"float32"
)
inputs
=
{
'X'
:
xnp
,
'Index'
:
numpy
.
array
([
1
,
3
,
5
]).
astype
(
"int32"
)}
self
.
check_grad
(
op
,
inputs
,
set
(
"X"
),
"Out"
)
def
test_check_grad
(
self
):
self
.
check_grad
([
'X'
],
'Out'
)
if
__name__
==
"__main__"
:
...
...
python/paddle/v2/framework/tests/test_gaussian_random_op.py
浏览文件 @
2d807f2b
...
...
@@ -14,11 +14,11 @@ class GaussianRandomTest(unittest.TestCase):
def
gaussian_random_test
(
self
,
place
):
scope
=
core
.
Scope
()
scope
.
new_var
(
"Out"
).
get_tensor
()
scope
.
new_var
(
'Out'
).
get_tensor
()
op
=
Operator
(
"gaussian_random"
,
Out
=
"Out"
,
Out
=
'Out'
,
dims
=
[
1000
,
784
],
mean
=
.
0
,
std
=
1.
,
...
...
@@ -27,10 +27,10 @@ class GaussianRandomTest(unittest.TestCase):
op
.
infer_shape
(
scope
)
context
=
core
.
DeviceContext
.
create
(
place
)
op
.
run
(
scope
,
context
)
tensor
=
numpy
.
array
(
scope
.
find_var
(
"Out"
).
get_tensor
())
tensor
=
numpy
.
array
(
scope
.
find_var
(
'Out'
).
get_tensor
())
self
.
assertAlmostEqual
(
numpy
.
mean
(
tensor
),
.
0
,
delta
=
0.1
)
self
.
assertAlmostEqual
(
numpy
.
std
(
tensor
),
1.
,
delta
=
0.1
)
if
__name__
==
'__main__'
:
if
__name__
==
"__main__"
:
unittest
.
main
()
python/paddle/v2/framework/tests/test_gradient_checker.py
浏览文件 @
2d807f2b
import
unittest
import
numpy
from
paddle.v2.framework.op
import
Operator
from
gradient_checker
import
GradientChecker
from
gradient_checker
import
get_numeric_gradient
import
numpy
as
np
import
paddle.v2.framework.core
as
core
from
op_test
import
get_numeric_gradient
from
op_test
import
create_op
class
GetNumericGradientTest
(
unittest
.
TestCase
):
def
test_add_op
(
self
):
add_op
=
Operator
(
"add"
,
X
=
"X"
,
Y
=
"Y"
,
Out
=
"Z"
)
x
=
numpy
.
random
.
random
((
10
,
1
)).
astype
(
"float32"
)
y
=
numpy
.
random
.
random
((
10
,
1
)).
astype
(
"float32"
)
arr
=
get_numeric_gradient
(
add_op
,
{
"X"
:
x
,
"Y"
:
y
},
"Z"
,
"X"
)
x
=
np
.
random
.
random
((
10
,
1
)).
astype
(
"float32"
)
y
=
np
.
random
.
random
((
10
,
1
)).
astype
(
"float32"
)
z
=
x
+
y
scope
=
core
.
Scope
()
add_op
=
create_op
(
scope
,
"add"
,
{
'X'
:
x
,
'Y'
:
y
},
{
'Out'
:
z
},
dict
())
arr
=
get_numeric_gradient
(
scope
,
add_op
,
{
'X'
:
x
,
'Y'
:
y
},
'X'
,
'Out'
)
self
.
assertAlmostEqual
(
arr
.
mean
(),
1.0
,
delta
=
1e-4
)
def
test_softmax_op
(
self
):
def
stable_softmax
(
x
):
"""Compute the softmax of vector x in a numerically stable way."""
shiftx
=
x
-
n
umpy
.
max
(
x
)
exps
=
n
umpy
.
exp
(
shiftx
)
return
exps
/
n
umpy
.
sum
(
exps
)
shiftx
=
x
-
n
p
.
max
(
x
)
exps
=
n
p
.
exp
(
shiftx
)
return
exps
/
n
p
.
sum
(
exps
)
def
label_softmax_grad
(
Y
,
dY
):
dX
=
Y
*
0.0
for
i
in
range
(
Y
.
shape
[
0
]):
d
=
n
umpy
.
dot
(
Y
[
i
,
:],
dY
[
i
,
:])
d
=
n
p
.
dot
(
Y
[
i
,
:],
dY
[
i
,
:])
dX
[
i
,
:]
=
Y
[
i
,
:]
*
(
dY
[
i
,
:]
-
d
)
return
dX
softmax_op
=
Operator
(
"softmax"
,
X
=
"X"
,
Y
=
"Y"
)
X
=
numpy
.
random
.
random
((
2
,
2
)).
astype
(
"float32"
)
Y
=
numpy
.
apply_along_axis
(
stable_softmax
,
1
,
X
)
dY
=
numpy
.
ones
(
Y
.
shape
)
X
=
np
.
random
.
random
((
2
,
2
)).
astype
(
"float32"
)
Y
=
np
.
apply_along_axis
(
stable_softmax
,
1
,
X
)
dY
=
np
.
ones
(
Y
.
shape
)
dX
=
label_softmax_grad
(
Y
,
dY
)
arr
=
get_numeric_gradient
(
softmax_op
,
{
"X"
:
X
},
"Y"
,
"X"
)
numpy
.
testing
.
assert_almost_equal
(
arr
,
dX
,
decimal
=
1e-2
)
scope
=
core
.
Scope
()
softmax_op
=
create_op
(
scope
,
"softmax"
,
{
"X"
:
X
},
{
"Y"
:
Y
},
dict
())
arr
=
get_numeric_gradient
(
scope
,
softmax_op
,
{
"X"
:
X
},
"X"
,
"Y"
)
np
.
testing
.
assert_almost_equal
(
arr
,
dX
,
decimal
=
1e-2
)
if
__name__
==
"__main__"
:
...
...
python/paddle/v2/framework/tests/test_lookup_table.py
浏览文件 @
2d807f2b
import
unittest
import
numpy
as
np
from
op_test_util
import
OpTestMeta
from
gradient_checker
import
GradientChecker
,
create_op
from
op_test
import
OpTest
class
TestLookupTableOp
(
unittest
.
TestCase
):
__metaclass__
=
OpTestMeta
class
TestLookupTableOp
(
OpTest
):
def
setUp
(
self
):
self
.
type
=
'lookup_table'
table
=
np
.
random
.
random
((
17
,
31
)).
astype
(
'float32'
)
ids
=
np
.
random
.
randint
(
0
,
17
,
4
).
astype
(
'int32'
)
self
.
op_type
=
"lookup_table"
table
=
np
.
random
.
random
((
17
,
31
)).
astype
(
"float32"
)
ids
=
np
.
random
.
randint
(
0
,
17
,
4
).
astype
(
"int32"
)
self
.
inputs
=
{
'W'
:
table
,
'Ids'
:
ids
}
self
.
outputs
=
{
'Out'
:
table
[
ids
]}
def
test_check_output
(
self
):
self
.
check_output
()
class
TestLookupTableGradOp
(
GradientChecker
):
def
test_grad
(
self
):
op
=
create_op
(
'lookup_table'
)
table
=
np
.
random
.
random
((
17
,
31
)).
astype
(
'float32'
)
ids
=
np
.
random
.
randint
(
0
,
17
,
4
).
astype
(
'int32'
)
inputs
=
{
'W'
:
table
,
'Ids'
:
ids
}
# comapre gradients
self
.
compare_grad
(
op
,
inputs
,
set
([
'Ids'
]))
# check gradients
self
.
check_grad
(
op
,
inputs
,
set
(
'W'
),
'Out'
)
def
test_check_grad
(
self
):
self
.
check_grad
([
'W'
],
'Out'
,
no_grad_set
=
set
(
'Ids'
))
if
__name__
==
'__main__'
:
if
__name__
==
"__main__"
:
unittest
.
main
()
python/paddle/v2/framework/tests/test_mean_op.py
浏览文件 @
2d807f2b
import
unittest
from
op_test_util
import
OpTestMeta
from
gradient_checker
import
GradientChecker
,
create_op
import
numpy
as
np
from
op_test
import
OpTest
class
TestMeanOp
(
unittest
.
TestCase
):
__metaclass__
=
OpTestMeta
class
TestMeanOp
(
OpTest
):
def
setUp
(
self
):
self
.
type
=
"mean"
self
.
inputs
=
{
'X'
:
np
.
random
.
random
((
32
,
784
)).
astype
(
"float32"
)}
self
.
outputs
=
{
'Out'
:
np
.
mean
(
self
.
inputs
[
'X'
])}
self
.
op_
type
=
"mean"
self
.
inputs
=
{
'X'
:
np
.
random
.
random
((
10
,
10
)).
astype
(
"float32"
)}
self
.
outputs
=
{
'Out'
:
np
.
mean
(
self
.
inputs
[
"X"
])}
def
test_check_output
(
self
):
self
.
check_output
()
class
MeanGradOpTest
(
GradientChecker
):
def
test_normal
(
self
):
op
=
create_op
(
"mean"
)
inputs
=
{
"X"
:
np
.
random
.
random
((
10
,
10
)).
astype
(
"float32"
)}
self
.
check_grad
(
op
,
inputs
,
set
(
"X"
),
"Out"
)
def
test_checkout_grad
(
self
):
self
.
check_grad
([
'X'
],
'Out'
)
if
__name__
==
'__main__'
:
if
__name__
==
"__main__"
:
unittest
.
main
()
python/paddle/v2/framework/tests/test_minus_op.py
浏览文件 @
2d807f2b
import
unittest
import
numpy
as
np
from
gradient_checker
import
GradientChecker
,
create_op
from
op_test_util
import
OpTestMeta
from
op_test
import
OpTest
class
MinusOpTest
(
unittest
.
TestCase
):
__metaclass__
=
OpTestMeta
class
MinusOpTest
(
OpTest
):
def
setUp
(
self
):
self
.
type
=
"minus"
self
.
op_
type
=
"minus"
self
.
inputs
=
{
'X'
:
np
.
random
.
random
((
32
,
84
)).
astype
(
"float32"
),
'Y'
:
np
.
random
.
random
((
32
,
84
)).
astype
(
"float32"
)
}
self
.
outputs
=
{
'Out'
:
(
self
.
inputs
[
'X'
]
-
self
.
inputs
[
'Y'
])}
def
test_check_output
(
self
):
self
.
check_output
()
class
MinusGradTest
(
GradientChecker
):
def
test_left
(
self
):
op
=
create_op
(
"minus"
)
inputs
=
{
"X"
:
np
.
random
.
random
((
10
,
10
)).
astype
(
"float32"
),
"Y"
:
np
.
random
.
random
((
10
,
10
)).
astype
(
"float32"
)
}
self
.
check_grad
(
op
,
inputs
,
[
"X"
,
'Y'
],
"Out"
)
def
test_check_grad
(
self
):
self
.
check_grad
([
'X'
,
'Y'
],
'Out'
)
if
__name__
==
'__main__'
:
if
__name__
==
"__main__"
:
unittest
.
main
()
python/paddle/v2/framework/tests/test_mul_op.py
浏览文件 @
2d807f2b
import
unittest
import
numpy
as
np
from
gradient_checker
import
GradientChecker
,
create_op
from
op_test_util
import
OpTestMeta
from
paddle.v2.framework.op
import
Operator
from
op_test
import
OpTest
class
TestMulOp
(
unittest
.
TestCase
):
__metaclass__
=
OpTestMeta
class
TestMulOp
(
OpTest
):
def
setUp
(
self
):
self
.
type
=
"mul"
self
.
op_
type
=
"mul"
self
.
inputs
=
{
'X'
:
np
.
random
.
random
((
32
,
84
)).
astype
(
"float32"
),
'Y'
:
np
.
random
.
random
((
84
,
100
)).
astype
(
"float32"
)
}
self
.
outputs
=
{
'Out'
:
np
.
dot
(
self
.
inputs
[
'X'
],
self
.
inputs
[
'Y'
])}
def
test_check_output
(
self
):
self
.
check_output
()
def
test_check_grad_normal
(
self
):
self
.
check_grad
([
'X'
,
'Y'
],
'Out'
,
max_relative_error
=
0.5
)
class
TestMulOp2
(
unittest
.
TestCase
):
__metaclass__
=
OpTestMeta
def
test_check_grad_ingore_x
(
self
):
self
.
check_grad
(
[
'Y'
],
'Out'
,
max_relative_error
=
0.5
,
no_grad_set
=
set
(
"X"
))
def
test_check_grad_ingore_y
(
self
):
self
.
check_grad
(
[
'X'
],
'Out'
,
max_relative_error
=
0.5
,
no_grad_set
=
set
(
'Y'
))
class
TestMulOp2
(
OpTest
):
def
setUp
(
self
):
self
.
type
=
"mul"
self
.
op_
type
=
"mul"
self
.
inputs
=
{
'X'
:
np
.
random
.
random
((
15
,
4
,
12
,
10
)).
astype
(
"float32"
),
'Y'
:
np
.
random
.
random
((
4
,
30
,
8
,
2
,
9
)).
astype
(
"float32"
)
...
...
@@ -32,72 +40,20 @@ class TestMulOp2(unittest.TestCase):
self
.
inputs
[
'Y'
].
reshape
(
4
*
30
,
8
*
2
*
9
))
}
def
test_check_output
(
self
):
self
.
check_output
()
class
TestMulGradOp
(
GradientChecker
):
def
setUp
(
self
):
self
.
op
=
create_op
(
"mul"
)
self
.
inputs
=
{
'X'
:
np
.
random
.
random
((
32
,
84
)).
astype
(
"float32"
),
'Y'
:
np
.
random
.
random
((
84
,
100
)).
astype
(
"float32"
)
}
def
test_cpu_gpu_compare
(
self
):
self
.
compare_grad
(
self
.
op
,
self
.
inputs
)
def
test_normal
(
self
):
# mul op will enlarge the relative error
self
.
check_grad
(
self
.
op
,
self
.
inputs
,
[
"X"
,
"Y"
],
"Out"
,
max_relative_error
=
0.5
)
def
test_ignore_x
(
self
):
self
.
check_grad
(
self
.
op
,
self
.
inputs
,
[
"Y"
],
"Out"
,
max_relative_error
=
0.5
,
no_grad_set
=
{
"X"
})
def
test_ignore_y
(
self
):
self
.
check_grad
(
self
.
op
,
self
.
inputs
,
[
"X"
],
"Out"
,
max_relative_error
=
0.5
,
no_grad_set
=
{
"Y"
})
class
TestMulGradTest2
(
GradientChecker
):
def
setUp
(
self
):
self
.
op
=
Operator
(
"mul"
,
X
=
"X"
,
Y
=
"Y"
,
Out
=
"Out"
,
x_num_col_dims
=
2
,
y_num_col_dims
=
2
)
self
.
inputs
=
{
"X"
:
np
.
random
.
random
((
15
,
4
,
12
,
10
)).
astype
(
"float32"
),
"Y"
:
np
.
random
.
random
((
4
,
30
,
8
,
2
,
9
)).
astype
(
"float32"
)
}
def
test_cpu_gpu_compare
(
self
):
self
.
compare_grad
(
self
.
op
,
self
.
inputs
)
def
test_normal
(
self
):
self
.
check_grad
(
self
.
op
,
self
.
inputs
,
[
"X"
,
"Y"
],
"Out"
,
max_relative_error
=
0.5
)
def
test_check_grad_normal
(
self
):
self
.
check_grad
([
'X'
,
'Y'
],
'Out'
,
max_relative_error
=
0.5
)
def
test_
ign
ore_x
(
self
):
def
test_
check_grad_ing
ore_x
(
self
):
self
.
check_grad
(
self
.
op
,
self
.
inputs
,
[
"Y"
],
"Out"
,
max_relative_error
=
0.5
,
no_grad_set
=
{
"X"
})
[
'Y'
],
'Out'
,
max_relative_error
=
0.5
,
no_grad_set
=
set
(
'X'
))
def
test_ignore_y
(
self
):
def
test_
check_grad_
ignore_y
(
self
):
self
.
check_grad
(
self
.
op
,
self
.
inputs
,
[
"X"
],
"Out"
,
max_relative_error
=
0.5
,
no_grad_set
=
{
"Y"
})
[
'X'
],
'Out'
,
max_relative_error
=
0.5
,
no_grad_set
=
set
(
'Y'
))
if
__name__
==
'__main__'
:
if
__name__
==
"__main__"
:
unittest
.
main
()
python/paddle/v2/framework/tests/test_net.py
浏览文件 @
2d807f2b
...
...
@@ -35,5 +35,5 @@ Op(plain_net), inputs:{all[W, X, Y]}, outputs:{all[Out, fc.out, pre_activation]}
self
.
assertEqual
(
expected
,
"
\n
"
+
str
(
net
))
if
__name__
==
'__main__'
:
if
__name__
==
"__main__"
:
unittest
.
main
()
python/paddle/v2/framework/tests/test_rowwise_add_op.py
浏览文件 @
2d807f2b
import
unittest
import
numpy
as
np
from
op_test_util
import
OpTestMeta
from
gradient_checker
import
GradientChecker
,
create_op
from
op_test
import
OpTest
class
TestRowwiseAddOp
(
unittest
.
TestCase
):
__metaclass__
=
OpTestMeta
def
setUp
(
self
):
self
.
type
=
"rowwise_add"
self
.
inputs
=
{
'X'
:
np
.
random
.
random
((
32
,
84
)).
astype
(
"float32"
),
'b'
:
np
.
random
.
random
(
84
).
astype
(
"float32"
)
}
self
.
outputs
=
{
'Out'
:
np
.
add
(
self
.
inputs
[
'X'
],
self
.
inputs
[
'b'
])}
class
TestRowwiseAddOp2
(
unittest
.
TestCase
):
__metaclass__
=
OpTestMeta
class
TestRowwiseAddOp
(
OpTest
):
def
setUp
(
self
):
self
.
type
=
"rowwise_add"
self
.
op_
type
=
"rowwise_add"
self
.
inputs
=
{
'X'
:
np
.
random
.
random
((
13
,
6
,
7
,
8
)
).
astype
(
"float32"
),
'b'
:
np
.
random
.
random
((
7
,
8
)
).
astype
(
"float32"
)
'X'
:
np
.
random
.
uniform
(
0.1
,
1
,
[
5
,
10
]
).
astype
(
"float32"
),
'b'
:
np
.
random
.
uniform
(
0.1
,
1
,
[
10
]
).
astype
(
"float32"
)
}
self
.
outputs
=
{
'Out'
:
np
.
add
(
self
.
inputs
[
'X'
],
self
.
inputs
[
'b'
])}
def
test_check_output
(
self
):
self
.
check_output
()
class
TestRowwiseAddGradOp
(
GradientChecker
):
def
setUp
(
self
):
self
.
op
=
create_op
(
"rowwise_add"
)
self
.
inputs
=
{
"X"
:
np
.
random
.
uniform
(
0.1
,
1
,
[
5
,
10
]).
astype
(
"float32"
),
"b"
:
np
.
random
.
uniform
(
0.1
,
1
,
[
10
]).
astype
(
"float32"
)
}
def
test_check_grad_normal
(
self
):
self
.
check_grad
([
'X'
,
'b'
],
'Out'
)
def
test_
normal
(
self
):
self
.
check_grad
(
self
.
op
,
self
.
inputs
,
[
"X"
,
"b"
],
"Out"
)
def
test_
check_grad_ingore_b
(
self
):
self
.
check_grad
(
[
'X'
],
'Out'
,
no_grad_set
=
set
(
'b'
)
)
def
test_
ignore_b
(
self
):
self
.
check_grad
(
self
.
op
,
self
.
inputs
,
[
"X"
],
"Out"
,
no_grad_set
=
{
"b"
}
)
def
test_
check_grad_ingore_x
(
self
):
self
.
check_grad
(
[
'b'
],
'Out'
,
no_grad_set
=
set
(
'X'
)
)
def
test_ignore_x
(
self
):
self
.
check_grad
(
self
.
op
,
self
.
inputs
,
[
"b"
],
"Out"
,
no_grad_set
=
{
"X"
})
class
TestRowwiseAddGradOp2
(
GradientChecker
):
class
TestRowwiseAddOp2
(
OpTest
):
def
setUp
(
self
):
self
.
op
=
create_op
(
"rowwise_add"
)
self
.
op
_type
=
"rowwise_add"
self
.
inputs
=
{
"X"
:
np
.
random
.
uniform
(
0.1
,
1
,
[
2
,
3
,
2
,
5
]).
astype
(
"float32"
),
"b"
:
np
.
random
.
uniform
(
0.1
,
1
,
[
2
,
5
]).
astype
(
"float32"
)
'X'
:
np
.
random
.
uniform
(
0.1
,
1
,
[
2
,
3
,
2
,
5
]).
astype
(
"float32"
),
'b'
:
np
.
random
.
uniform
(
0.1
,
1
,
[
2
,
5
]).
astype
(
"float32"
)
}
self
.
outputs
=
{
'Out'
:
np
.
add
(
self
.
inputs
[
'X'
],
self
.
inputs
[
'b'
])}
def
test_check_output
(
self
):
self
.
check_output
()
def
test_normal
(
self
):
self
.
check_grad
(
self
.
op
,
self
.
inputs
,
[
"X"
,
"b"
],
"Out"
)
def
test_
check_grad_
normal
(
self
):
self
.
check_grad
(
[
'X'
,
'b'
],
'Out'
)
def
test_ignore_b
(
self
):
self
.
check_grad
(
self
.
op
,
self
.
inputs
,
[
"X"
],
"Out"
,
no_grad_set
=
{
"b"
}
)
def
test_
check_grad_
ignore_b
(
self
):
self
.
check_grad
(
[
'X'
],
'Out'
,
no_grad_set
=
set
(
'b'
)
)
def
test_ignore_x
(
self
):
self
.
check_grad
(
self
.
op
,
self
.
inputs
,
[
"b"
],
"Out"
,
no_grad_set
=
{
"X"
}
)
def
test_
check_grad_
ignore_x
(
self
):
self
.
check_grad
(
[
'b'
],
'Out'
,
no_grad_set
=
set
(
'X'
)
)
if
__name__
==
'__main__'
:
if
__name__
==
"__main__"
:
unittest
.
main
()
python/paddle/v2/framework/tests/test_scale_and_identity_op.py
浏览文件 @
2d807f2b
import
unittest
from
op_test_util
import
OpTestMeta
from
gradient_checker
import
GradientChecker
,
create_op
import
numpy
as
np
from
paddle.v2.framework.op
import
Operator
from
op_test
import
OpTest
class
IdentityTest
(
unittest
.
TestCase
):
__metaclass__
=
OpTestMeta
class
IdentityTest
(
OpTest
):
def
setUp
(
self
):
self
.
type
=
"identity"
self
.
inputs
=
{
'X'
:
np
.
random
.
random
((
32
,
784
)).
astype
(
"float32"
)}
self
.
op_
type
=
"identity"
self
.
inputs
=
{
'X'
:
np
.
random
.
random
((
10
,
10
)).
astype
(
"float32"
)}
self
.
outputs
=
{
'Out'
:
self
.
inputs
[
'X'
]}
def
test_check_output
(
self
):
self
.
check_output
()
class
IdentityGradOpTest
(
GradientChecker
):
def
test_normal
(
self
):
op
=
create_op
(
"identity"
)
inputs
=
{
"X"
:
np
.
random
.
random
((
10
,
10
)).
astype
(
"float32"
)}
self
.
check_grad
(
op
,
inputs
,
set
(
"X"
),
"Out"
)
def
test_check_grad
(
self
):
self
.
check_grad
([
'X'
],
'Out'
)
class
ScaleTest
(
unittest
.
TestCase
):
__metaclass__
=
OpTestMeta
class
ScaleTest
(
OpTest
):
def
setUp
(
self
):
self
.
type
=
"scale"
self
.
inputs
=
{
'X'
:
np
.
random
.
random
((
32
,
784
)).
astype
(
"float32"
)}
self
.
op_
type
=
"scale"
self
.
inputs
=
{
'X'
:
np
.
random
.
random
((
10
,
10
)).
astype
(
"float32"
)}
self
.
attrs
=
{
'scale'
:
-
2.3
}
self
.
outputs
=
{
'Out'
:
self
.
inputs
[
'X'
]
*
self
.
attrs
[
'scale'
]}
def
test_check_output
(
self
):
self
.
check_output
()
class
ScaleGradTest
(
GradientChecker
):
def
test_normal
(
self
):
op
=
Operator
(
"scale"
,
X
=
"X"
,
Out
=
"Out"
,
scale
=
3.2
)
self
.
check_grad
(
op
,
{
"X"
:
np
.
random
.
random
((
10
,
10
)).
astype
(
"float32"
)},
set
(
"X"
),
"Out"
)
def
test_check_grad
(
self
):
self
.
check_grad
([
'X'
],
'Out'
)
if
__name__
==
'__main__'
:
if
__name__
==
"__main__"
:
unittest
.
main
()
python/paddle/v2/framework/tests/test_scatter_op.py
浏览文件 @
2d807f2b
import
unittest
from
op_test_util
import
OpTestMeta
from
gradient_checker
import
GradientChecker
,
create_op
import
numpy
import
paddle.v2.framework.core
as
core
from
paddle.v2.framework.op
import
Operator
import
numpy
as
np
from
op_test
import
OpTest
class
TestScatterOp
(
unittest
.
TestCase
):
__metaclass__
=
OpTestMeta
class
TestScatterOp
(
OpTest
):
def
setUp
(
self
):
self
.
type
=
"scatter"
ref_np
=
n
umpy
.
ones
((
3
,
3
)).
astype
(
"float32"
)
index_np
=
n
umpy
.
array
([
1
,
2
]).
astype
(
"int32"
)
updates_np
=
n
umpy
.
random
.
random
((
2
,
3
)).
astype
(
"float32"
)
output_np
=
n
umpy
.
copy
(
ref_np
)
self
.
op_
type
=
"scatter"
ref_np
=
n
p
.
ones
((
3
,
3
)).
astype
(
"float32"
)
index_np
=
n
p
.
array
([
1
,
2
]).
astype
(
"int32"
)
updates_np
=
n
p
.
random
.
random
((
2
,
3
)).
astype
(
"float32"
)
output_np
=
n
p
.
copy
(
ref_np
)
output_np
[
index_np
]
+=
updates_np
self
.
inputs
=
{
'Ref'
:
ref_np
,
'Index'
:
index_np
,
'Updates'
:
updates_np
}
self
.
outputs
=
{
'Out'
:
output_np
}
def
test_check_output
(
self
):
self
.
check_output
()
class
TestScatterGradOp
(
GradientChecker
):
def
test_scatter_grad
(
self
):
op
=
create_op
(
"scatter"
)
# test data setup
ref_np
=
numpy
.
ones
((
3
,
10
)).
astype
(
"float32"
)
index_np
=
numpy
.
array
([
1
,
2
]).
astype
(
"int32"
)
updates_np
=
numpy
.
random
.
random
((
2
,
10
)).
astype
(
"float32"
)
output_np
=
numpy
.
copy
(
ref_np
)
output_np
[
index_np
]
+=
updates_np
inputs
=
{
'Ref'
:
ref_np
,
'Index'
:
index_np
,
'Updates'
:
updates_np
}
self
.
check_grad
(
op
,
inputs
,
set
([
"Updates"
,
"Ref"
]),
"Out"
,
in_place
=
True
)
def
test_check_grad
(
self
):
self
.
check_grad
([
'Updates'
,
'Ref'
],
'Out'
,
in_place
=
True
)
if
__name__
==
"__main__"
:
...
...
python/paddle/v2/framework/tests/test_sgd_op.py
浏览文件 @
2d807f2b
import
unittest
import
numpy
from
op_test
_util
import
OpTestMeta
import
numpy
as
np
from
op_test
import
OpTest
class
TestSGD
(
unittest
.
TestCase
):
__metaclass__
=
OpTestMeta
class
TestSGD
(
OpTest
):
def
setUp
(
self
):
self
.
type
=
"sgd"
w
=
n
umpy
.
random
.
random
((
102
,
105
)).
astype
(
"float32"
)
g
=
n
umpy
.
random
.
random
((
102
,
105
)).
astype
(
"float32"
)
self
.
op_
type
=
"sgd"
w
=
n
p
.
random
.
random
((
102
,
105
)).
astype
(
"float32"
)
g
=
n
p
.
random
.
random
((
102
,
105
)).
astype
(
"float32"
)
lr
=
0.1
self
.
inputs
=
{
'param'
:
w
,
'grad'
:
g
}
self
.
attrs
=
{
'learning_rate'
:
lr
}
self
.
outputs
=
{
'param_out'
:
w
-
lr
*
g
}
def
test_check_output
(
self
):
self
.
check_output
()
if
__name__
==
"__main__"
:
unittest
.
main
()
python/paddle/v2/framework/tests/test_softmax_op.py
浏览文件 @
2d807f2b
import
unittest
import
numpy
as
np
from
gradient_checker
import
GradientChecker
,
create_op
from
op_test_util
import
OpTestMeta
from
op_test
import
OpTest
def
stable_softmax
(
x
):
...
...
@@ -13,26 +10,21 @@ def stable_softmax(x):
return
exps
/
np
.
sum
(
exps
)
class
TestSoftmaxOp
(
unittest
.
TestCase
):
__metaclass__
=
OpTestMeta
class
TestSoftmaxOp
(
OpTest
):
def
setUp
(
self
):
self
.
type
=
"softmax"
self
.
inputs
=
{
"X"
:
np
.
random
.
random
((
10
,
10
)).
astype
(
"float32"
)}
self
.
inputs
=
{
'X'
:
np
.
random
.
uniform
(
0.1
,
1
,
[
10
,
10
]).
astype
(
"float32"
)
}
self
.
outputs
=
{
"Y"
:
np
.
apply_along_axis
(
stable_softmax
,
1
,
self
.
inputs
[
"X"
])
'Y'
:
np
.
apply_along_axis
(
stable_softmax
,
1
,
self
.
inputs
[
'X'
])
}
def
test_check_output
(
self
):
self
.
check_output
()
class
TestSoftmaxGradOp
(
GradientChecker
):
def
setUp
(
self
):
self
.
op
=
create_op
(
"softmax"
)
self
.
inputs
=
{
"X"
:
np
.
random
.
uniform
(
0.1
,
1
,
[
10
,
10
]).
astype
(
"float32"
)
}
def
test_softmax_grad
(
self
):
self
.
check_grad
(
self
.
op
,
self
.
inputs
,
[
"X"
],
"Y"
)
def
test_check_grad
(
self
):
self
.
check_grad
([
'X'
],
'Y'
)
if
__name__
==
"__main__"
:
...
...
python/paddle/v2/framework/tests/test_squared_l2_distance_op.py
浏览文件 @
2d807f2b
import
unittest
from
op_test_util
import
OpTestMeta
from
gradient_checker
import
GradientChecker
,
create_op
import
numpy
as
np
from
op_test
import
OpTest
class
TestSquaredL2DistanceOp_f0
(
unittest
.
TestCase
):
__metaclass__
=
OpTestMeta
class
TestSquaredL2DistanceOp_f0
(
OpTest
):
def
setUp
(
self
):
self
.
type
=
'squared_l2_distance'
self
.
op_type
=
"squared_l2_distance"
self
.
inputs
=
{
'X'
:
np
.
random
.
uniform
(
0.1
,
1.
,
(
32
,
64
)).
astype
(
'float32'
),
'Y'
:
np
.
random
.
uniform
(
0.1
,
1.
,
(
32
,
64
)).
astype
(
'float32'
)
'X'
:
np
.
random
.
uniform
(
0.1
,
0.6
,
(
2
,
3
)).
astype
(
"float32"
),
'Y'
:
np
.
random
.
uniform
(
0.1
,
0.6
,
(
2
,
3
)).
astype
(
"float32"
)
}
sub_res
=
self
.
inputs
[
'X'
]
-
self
.
inputs
[
'Y'
]
output
=
sub_res
*
sub_res
...
...
@@ -20,15 +17,19 @@ class TestSquaredL2DistanceOp_f0(unittest.TestCase):
'Out'
:
np
.
expand_dims
(
output
.
sum
(
1
),
1
)
}
def
test_check_output
(
self
):
self
.
check_output
()
def
test_check_grad
(
self
):
self
.
check_grad
([
'X'
,
'Y'
],
'Out'
)
class
TestSquaredL2DistanceOp_f1
(
unittest
.
TestCase
):
__metaclass__
=
OpTestMeta
class
TestSquaredL2DistanceOp_f1
(
OpTest
):
def
setUp
(
self
):
self
.
type
=
'squared_l2_distance'
self
.
op_type
=
"squared_l2_distance"
self
.
inputs
=
{
'X'
:
np
.
random
.
uniform
(
0.1
,
1.
,
(
32
,
64
)).
astype
(
'float32'
),
'Y'
:
np
.
random
.
uniform
(
0.1
,
1.
,
(
1
,
64
)).
astype
(
'float32'
)
'X'
:
np
.
random
.
uniform
(
0.1
,
0.6
,
(
2
,
3
)).
astype
(
"float32"
),
'Y'
:
np
.
random
.
uniform
(
0.1
,
0.6
,
(
1
,
3
)).
astype
(
"float32"
)
}
sub_res
=
self
.
inputs
[
'X'
]
-
self
.
inputs
[
'Y'
]
output
=
sub_res
*
sub_res
...
...
@@ -37,53 +38,34 @@ class TestSquaredL2DistanceOp_f1(unittest.TestCase):
'Out'
:
np
.
expand_dims
(
output
.
sum
(
1
),
1
)
}
def
test_check_output
(
self
):
self
.
check_output
()
class
TestSquaredL2DistanceOp_f2
(
unittest
.
TestCase
):
__metaclass__
=
OpTestMeta
def
test_check_grad
(
self
):
self
.
check_grad
([
'X'
,
'Y'
],
'Out'
)
class
TestSquaredL2DistanceOp_f2
(
OpTest
):
def
setUp
(
self
):
self
.
type
=
'squared_l2_distance'
self
.
op_type
=
"squared_l2_distance"
self
.
inputs
=
{
'X'
:
np
.
random
.
uniform
(
0.1
,
1.
,
(
32
,
64
,
128
)).
astype
(
'float32'
),
'Y'
:
np
.
random
.
uniform
(
0.1
,
1.
,
(
1
,
64
,
128
)).
astype
(
'float32'
)
'X'
:
np
.
random
.
uniform
(
0.1
,
0.6
,
(
2
,
3
,
4
)).
astype
(
"float32"
),
'Y'
:
np
.
random
.
uniform
(
0.1
,
0.6
,
(
1
,
3
,
4
)).
astype
(
"float32"
)
}
sub_res
=
self
.
inputs
[
'X'
]
-
self
.
inputs
[
'Y'
]
sub_res
=
sub_res
.
reshape
((
32
,
64
*
128
))
sub_res
=
sub_res
.
reshape
((
2
,
3
*
4
))
output
=
sub_res
*
sub_res
self
.
outputs
=
{
'sub_result'
:
sub_res
,
'Out'
:
np
.
expand_dims
(
output
.
sum
(
1
),
1
)
}
def
test_check_output
(
self
):
self
.
check_output
()
class
TestSquaredL2DistanceGradOp
(
GradientChecker
):
def
test_squared_l2_distance_b0
(
self
):
op
=
create_op
(
"squared_l2_distance"
)
inputs
=
{
'X'
:
np
.
random
.
uniform
(
0.1
,
.
6
,
(
2
,
3
)).
astype
(
'float32'
),
'Y'
:
np
.
random
.
uniform
(
0.1
,
.
6
,
(
2
,
3
)).
astype
(
'float32'
)
}
self
.
compare_grad
(
op
,
inputs
)
self
.
check_grad
(
op
,
inputs
,
set
([
"X"
,
"Y"
]),
"Out"
)
def
test_squared_l2_distance_b1
(
self
):
op
=
create_op
(
"squared_l2_distance"
)
inputs
=
{
'X'
:
np
.
random
.
uniform
(
0.1
,
.
6
,
(
2
,
3
)).
astype
(
'float32'
),
'Y'
:
np
.
random
.
uniform
(
0.1
,
.
6
,
(
1
,
3
)).
astype
(
'float32'
)
}
self
.
compare_grad
(
op
,
inputs
)
self
.
check_grad
(
op
,
inputs
,
set
([
"X"
,
"Y"
]),
"Out"
)
def
test_squared_l2_distance_b2
(
self
):
op
=
create_op
(
"squared_l2_distance"
)
inputs
=
{
'X'
:
np
.
random
.
uniform
(
0.1
,
.
6
,
(
2
,
3
,
4
)).
astype
(
'float32'
),
'Y'
:
np
.
random
.
uniform
(
0.1
,
.
6
,
(
1
,
3
,
4
)).
astype
(
'float32'
)
}
self
.
compare_grad
(
op
,
inputs
)
self
.
check_grad
(
op
,
inputs
,
set
([
"X"
,
"Y"
]),
"Out"
)
def
test_check_grad
(
self
):
self
.
check_grad
([
'X'
,
'Y'
],
'Out'
)
if
__name__
==
'__main__'
:
if
__name__
==
"__main__"
:
unittest
.
main
()
python/paddle/v2/framework/tests/test_sum_op.py
浏览文件 @
2d807f2b
...
...
@@ -6,10 +6,10 @@ from op_test import OpTest
class
TestSumOp
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"sum"
x0
=
np
.
random
.
random
((
3
,
4
)).
astype
(
'float32'
)
x1
=
np
.
random
.
random
((
3
,
4
)).
astype
(
'float32'
)
x2
=
np
.
random
.
random
((
3
,
4
)).
astype
(
'float32'
)
self
.
inputs
=
{
"X"
:
{
"x0"
:
x0
,
"x1"
:
x1
,
"x2"
:
x2
}}
x0
=
np
.
random
.
random
((
3
,
4
)).
astype
(
"float32"
)
x1
=
np
.
random
.
random
((
3
,
4
)).
astype
(
"float32"
)
x2
=
np
.
random
.
random
((
3
,
4
)).
astype
(
"float32"
)
self
.
inputs
=
{
'X'
:
{
'x0'
:
x0
,
'x1'
:
x1
,
'x2'
:
x2
}}
y
=
x0
+
x1
+
x2
self
.
outputs
=
{
'Out'
:
y
}
...
...
@@ -17,8 +17,8 @@ class TestSumOp(OpTest):
self
.
check_output
()
def
test_check_grad
(
self
):
self
.
check_grad
([
"x0"
],
"Out"
)
self
.
check_grad
([
'x0'
],
'Out'
)
if
__name__
==
'__main__'
:
if
__name__
==
"__main__"
:
unittest
.
main
()
python/paddle/v2/framework/tests/test_top_k_op.py
浏览文件 @
2d807f2b
import
unittest
import
numpy
as
np
from
gradient_checker
import
GradientChecker
,
create_op
from
op_test_util
import
OpTestMeta
from
op_test
import
OpTest
class
TestTopkOp
(
unittest
.
TestCase
):
__metaclass__
=
OpTestMeta
class
TestTopkOp
(
OpTest
):
def
setUp
(
self
):
self
.
type
=
"top_k"
self
.
op_
type
=
"top_k"
k
=
1
input
=
np
.
random
.
random
((
32
,
84
)).
astype
(
"float32"
)
output
=
np
.
ndarray
((
32
,
k
))
...
...
@@ -25,11 +22,9 @@ class TestTopkOp(unittest.TestCase):
self
.
outputs
=
{
'Out'
:
output
,
'Indices'
:
indices
}
class
TestTopkOp3d
(
unittest
.
TestCase
):
__metaclass__
=
OpTestMeta
class
TestTopkOp3d
(
OpTest
):
def
setUp
(
self
):
self
.
type
=
"top_k"
self
.
op_
type
=
"top_k"
k
=
1
input
=
np
.
random
.
random
((
32
,
2
,
84
)).
astype
(
"float32"
)
input_flat_2d
=
input
.
reshape
(
64
,
84
)
...
...
@@ -48,5 +43,5 @@ class TestTopkOp3d(unittest.TestCase):
self
.
outputs
=
{
'Out'
:
output
,
'Indices'
:
indices
}
if
__name__
==
'__main__'
:
if
__name__
==
"__main__"
:
unittest
.
main
()
python/paddle/v2/framework/tests/test_uniform_random_op.py
浏览文件 @
2d807f2b
...
...
@@ -14,11 +14,11 @@ class UniformRandomTest(unittest.TestCase):
def
uniform_random_test
(
self
,
place
):
scope
=
core
.
Scope
()
scope
.
new_var
(
"X"
).
get_tensor
()
scope
.
new_var
(
'X'
).
get_tensor
()
op
=
Operator
(
"uniform_random"
,
Out
=
"X"
,
Out
=
'X'
,
dims
=
[
1000
,
784
],
min
=-
5.0
,
max
=
10.0
,
...
...
@@ -27,9 +27,9 @@ class UniformRandomTest(unittest.TestCase):
op
.
infer_shape
(
scope
)
ctx
=
core
.
DeviceContext
.
create
(
place
)
op
.
run
(
scope
,
ctx
)
tensor
=
numpy
.
array
(
scope
.
find_var
(
"X"
).
get_tensor
())
tensor
=
numpy
.
array
(
scope
.
find_var
(
'X'
).
get_tensor
())
self
.
assertAlmostEqual
(
tensor
.
mean
(),
2.5
,
delta
=
0.1
)
if
__name__
==
'__main__'
:
if
__name__
==
"__main__"
:
unittest
.
main
()
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录