Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
9218e742
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
9218e742
编写于
11月 30, 2022
作者:
H
HongyuJia
提交者:
GitHub
11月 30, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
clean elem_arithmetic part5 unittest (#48466)
上级
96a8bbe7
变更
19
隐藏空白更改
内联
并排
Showing
19 changed file
with
57 addition
and
60 deletion
+57
-60
python/paddle/fluid/tests/unittests/test_dist_fleet_ps.py
python/paddle/fluid/tests/unittests/test_dist_fleet_ps.py
+3
-3
python/paddle/fluid/tests/unittests/test_dist_fleet_ps11.py
python/paddle/fluid/tests/unittests/test_dist_fleet_ps11.py
+3
-3
python/paddle/fluid/tests/unittests/test_dist_fleet_ps12.py
python/paddle/fluid/tests/unittests/test_dist_fleet_ps12.py
+3
-3
python/paddle/fluid/tests/unittests/test_dist_fleet_ps13.py
python/paddle/fluid/tests/unittests/test_dist_fleet_ps13.py
+3
-3
python/paddle/fluid/tests/unittests/test_dist_fleet_ps2.py
python/paddle/fluid/tests/unittests/test_dist_fleet_ps2.py
+3
-3
python/paddle/fluid/tests/unittests/test_dist_fleet_ps3.py
python/paddle/fluid/tests/unittests/test_dist_fleet_ps3.py
+3
-3
python/paddle/fluid/tests/unittests/test_dist_fleet_ps4.py
python/paddle/fluid/tests/unittests/test_dist_fleet_ps4.py
+3
-3
python/paddle/fluid/tests/unittests/test_dist_fleet_ps5.py
python/paddle/fluid/tests/unittests/test_dist_fleet_ps5.py
+3
-3
python/paddle/fluid/tests/unittests/test_dist_fleet_ps6.py
python/paddle/fluid/tests/unittests/test_dist_fleet_ps6.py
+3
-3
python/paddle/fluid/tests/unittests/test_dist_transpiler.py
python/paddle/fluid/tests/unittests/test_dist_transpiler.py
+4
-4
python/paddle/fluid/tests/unittests/test_eager_deletion_padding_rnn.py
.../fluid/tests/unittests/test_eager_deletion_padding_rnn.py
+3
-3
python/paddle/fluid/tests/unittests/test_eager_deletion_recurrent_op.py
...fluid/tests/unittests/test_eager_deletion_recurrent_op.py
+8
-10
python/paddle/fluid/tests/unittests/test_elementwise_nn_grad.py
.../paddle/fluid/tests/unittests/test_elementwise_nn_grad.py
+3
-3
python/paddle/fluid/tests/unittests/test_image_classification_layer.py
.../fluid/tests/unittests/test_image_classification_layer.py
+2
-1
python/paddle/fluid/tests/unittests/test_imperative_basic.py
python/paddle/fluid/tests/unittests/test_imperative_basic.py
+5
-5
python/paddle/fluid/tests/unittests/test_imperative_deepcf.py
...on/paddle/fluid/tests/unittests/test_imperative_deepcf.py
+1
-1
python/paddle/fluid/tests/unittests/test_imperative_lod_tensor_to_selected_rows.py
.../unittests/test_imperative_lod_tensor_to_selected_rows.py
+1
-1
python/paddle/fluid/tests/unittests/test_imperative_ocr_attention_model.py
...id/tests/unittests/test_imperative_ocr_attention_model.py
+2
-4
python/paddle/fluid/tests/unittests/test_imperative_parallel_coalesce_split.py
...ests/unittests/test_imperative_parallel_coalesce_split.py
+1
-1
未找到文件。
python/paddle/fluid/tests/unittests/test_dist_fleet_ps.py
浏览文件 @
9218e742
...
...
@@ -39,7 +39,7 @@ class TestPSPassWithBow(unittest.TestCase):
cond
=
fluid
.
layers
.
less_than
(
cos_q_nt
,
cos_q_pt
)
cond
=
fluid
.
layers
.
cast
(
cond
,
dtype
=
'float64'
)
cond_3
=
paddle
.
sum
(
cond
)
acc
=
fluid
.
layers
.
elementwise_div
(
acc
=
paddle
.
divide
(
cond_3
,
fluid
.
layers
.
fill_constant
(
shape
=
[
1
],
value
=
batch_size
*
1.0
,
dtype
=
'float64'
...
...
@@ -49,13 +49,13 @@ class TestPSPassWithBow(unittest.TestCase):
return
acc
def
get_loss
(
cos_q_pt
,
cos_q_nt
):
loss_op1
=
fluid
.
layers
.
elementwise_sub
(
loss_op1
=
paddle
.
subtract
(
fluid
.
layers
.
fill_constant_batch_size_like
(
input
=
cos_q_pt
,
shape
=
[
-
1
,
1
],
value
=
margin
,
dtype
=
'float32'
),
cos_q_pt
,
)
loss_op2
=
fluid
.
layers
.
elementwise_
add
(
loss_op1
,
cos_q_nt
)
loss_op2
=
paddle
.
add
(
loss_op1
,
cos_q_nt
)
loss_op3
=
paddle
.
maximum
(
fluid
.
layers
.
fill_constant_batch_size_like
(
input
=
loss_op2
,
shape
=
[
-
1
,
1
],
value
=
0.0
,
dtype
=
'float32'
...
...
python/paddle/fluid/tests/unittests/test_dist_fleet_ps11.py
浏览文件 @
9218e742
...
...
@@ -39,7 +39,7 @@ class TestPSPassWithBow(unittest.TestCase):
cond
=
fluid
.
layers
.
less_than
(
cos_q_nt
,
cos_q_pt
)
cond
=
fluid
.
layers
.
cast
(
cond
,
dtype
=
'float64'
)
cond_3
=
paddle
.
sum
(
cond
)
acc
=
fluid
.
layers
.
elementwise_div
(
acc
=
paddle
.
divide
(
cond_3
,
fluid
.
layers
.
fill_constant
(
shape
=
[
1
],
value
=
batch_size
*
1.0
,
dtype
=
'float64'
...
...
@@ -49,13 +49,13 @@ class TestPSPassWithBow(unittest.TestCase):
return
acc
def
get_loss
(
cos_q_pt
,
cos_q_nt
):
loss_op1
=
fluid
.
layers
.
elementwise_sub
(
loss_op1
=
paddle
.
subtract
(
fluid
.
layers
.
fill_constant_batch_size_like
(
input
=
cos_q_pt
,
shape
=
[
-
1
,
1
],
value
=
margin
,
dtype
=
'float32'
),
cos_q_pt
,
)
loss_op2
=
fluid
.
layers
.
elementwise_
add
(
loss_op1
,
cos_q_nt
)
loss_op2
=
paddle
.
add
(
loss_op1
,
cos_q_nt
)
loss_op3
=
paddle
.
maximum
(
fluid
.
layers
.
fill_constant_batch_size_like
(
input
=
loss_op2
,
shape
=
[
-
1
,
1
],
value
=
0.0
,
dtype
=
'float32'
...
...
python/paddle/fluid/tests/unittests/test_dist_fleet_ps12.py
浏览文件 @
9218e742
...
...
@@ -42,7 +42,7 @@ class TestPSPassWithBow(unittest.TestCase):
cond
=
fluid
.
layers
.
less_than
(
cos_q_nt
,
cos_q_pt
)
cond
=
fluid
.
layers
.
cast
(
cond
,
dtype
=
'float64'
)
cond_3
=
paddle
.
sum
(
cond
)
acc
=
fluid
.
layers
.
elementwise_div
(
acc
=
paddle
.
divide
(
cond_3
,
fluid
.
layers
.
fill_constant
(
shape
=
[
1
],
value
=
batch_size
*
1.0
,
dtype
=
'float64'
...
...
@@ -52,13 +52,13 @@ class TestPSPassWithBow(unittest.TestCase):
return
acc
def
get_loss
(
cos_q_pt
,
cos_q_nt
):
loss_op1
=
fluid
.
layers
.
elementwise_sub
(
loss_op1
=
paddle
.
subtract
(
fluid
.
layers
.
fill_constant_batch_size_like
(
input
=
cos_q_pt
,
shape
=
[
-
1
,
1
],
value
=
margin
,
dtype
=
'float32'
),
cos_q_pt
,
)
loss_op2
=
fluid
.
layers
.
elementwise_
add
(
loss_op1
,
cos_q_nt
)
loss_op2
=
paddle
.
add
(
loss_op1
,
cos_q_nt
)
loss_op3
=
paddle
.
maximum
(
fluid
.
layers
.
fill_constant_batch_size_like
(
input
=
loss_op2
,
shape
=
[
-
1
,
1
],
value
=
0.0
,
dtype
=
'float32'
...
...
python/paddle/fluid/tests/unittests/test_dist_fleet_ps13.py
浏览文件 @
9218e742
...
...
@@ -43,7 +43,7 @@ class TestPSPassWithBow(unittest.TestCase):
cond
=
fluid
.
layers
.
less_than
(
cos_q_nt
,
cos_q_pt
)
cond
=
fluid
.
layers
.
cast
(
cond
,
dtype
=
'float64'
)
cond_3
=
paddle
.
sum
(
cond
)
acc
=
fluid
.
layers
.
elementwise_div
(
acc
=
paddle
.
divide
(
cond_3
,
fluid
.
layers
.
fill_constant
(
shape
=
[
1
],
value
=
batch_size
*
1.0
,
dtype
=
'float64'
...
...
@@ -53,13 +53,13 @@ class TestPSPassWithBow(unittest.TestCase):
return
acc
def
get_loss
(
cos_q_pt
,
cos_q_nt
):
loss_op1
=
fluid
.
layers
.
elementwise_sub
(
loss_op1
=
paddle
.
subtract
(
fluid
.
layers
.
fill_constant_batch_size_like
(
input
=
cos_q_pt
,
shape
=
[
-
1
,
1
],
value
=
margin
,
dtype
=
'float32'
),
cos_q_pt
,
)
loss_op2
=
fluid
.
layers
.
elementwise_
add
(
loss_op1
,
cos_q_nt
)
loss_op2
=
paddle
.
add
(
loss_op1
,
cos_q_nt
)
loss_op3
=
paddle
.
maximum
(
fluid
.
layers
.
fill_constant_batch_size_like
(
input
=
loss_op2
,
shape
=
[
-
1
,
1
],
value
=
0.0
,
dtype
=
'float32'
...
...
python/paddle/fluid/tests/unittests/test_dist_fleet_ps2.py
浏览文件 @
9218e742
...
...
@@ -42,7 +42,7 @@ class TestPSPassWithBow(unittest.TestCase):
cond
=
fluid
.
layers
.
less_than
(
cos_q_nt
,
cos_q_pt
)
cond
=
fluid
.
layers
.
cast
(
cond
,
dtype
=
'float64'
)
cond_3
=
paddle
.
sum
(
cond
)
acc
=
fluid
.
layers
.
elementwise_div
(
acc
=
paddle
.
divide
(
cond_3
,
fluid
.
layers
.
fill_constant
(
shape
=
[
1
],
value
=
batch_size
*
1.0
,
dtype
=
'float64'
...
...
@@ -52,13 +52,13 @@ class TestPSPassWithBow(unittest.TestCase):
return
acc
def
get_loss
(
cos_q_pt
,
cos_q_nt
):
loss_op1
=
fluid
.
layers
.
elementwise_sub
(
loss_op1
=
paddle
.
subtract
(
fluid
.
layers
.
fill_constant_batch_size_like
(
input
=
cos_q_pt
,
shape
=
[
-
1
,
1
],
value
=
margin
,
dtype
=
'float32'
),
cos_q_pt
,
)
loss_op2
=
fluid
.
layers
.
elementwise_
add
(
loss_op1
,
cos_q_nt
)
loss_op2
=
paddle
.
add
(
loss_op1
,
cos_q_nt
)
loss_op3
=
paddle
.
maximum
(
fluid
.
layers
.
fill_constant_batch_size_like
(
input
=
loss_op2
,
shape
=
[
-
1
,
1
],
value
=
0.0
,
dtype
=
'float32'
...
...
python/paddle/fluid/tests/unittests/test_dist_fleet_ps3.py
浏览文件 @
9218e742
...
...
@@ -39,7 +39,7 @@ class TestPSPassWithBow(unittest.TestCase):
cond
=
fluid
.
layers
.
less_than
(
cos_q_nt
,
cos_q_pt
)
cond
=
fluid
.
layers
.
cast
(
cond
,
dtype
=
'float64'
)
cond_3
=
paddle
.
sum
(
cond
)
acc
=
fluid
.
layers
.
elementwise_div
(
acc
=
paddle
.
divide
(
cond_3
,
fluid
.
layers
.
fill_constant
(
shape
=
[
1
],
value
=
batch_size
*
1.0
,
dtype
=
'float64'
...
...
@@ -49,13 +49,13 @@ class TestPSPassWithBow(unittest.TestCase):
return
acc
def
get_loss
(
cos_q_pt
,
cos_q_nt
):
loss_op1
=
fluid
.
layers
.
elementwise_sub
(
loss_op1
=
paddle
.
subtract
(
fluid
.
layers
.
fill_constant_batch_size_like
(
input
=
cos_q_pt
,
shape
=
[
-
1
,
1
],
value
=
margin
,
dtype
=
'float32'
),
cos_q_pt
,
)
loss_op2
=
fluid
.
layers
.
elementwise_
add
(
loss_op1
,
cos_q_nt
)
loss_op2
=
paddle
.
add
(
loss_op1
,
cos_q_nt
)
loss_op3
=
paddle
.
maximum
(
fluid
.
layers
.
fill_constant_batch_size_like
(
input
=
loss_op2
,
shape
=
[
-
1
,
1
],
value
=
0.0
,
dtype
=
'float32'
...
...
python/paddle/fluid/tests/unittests/test_dist_fleet_ps4.py
浏览文件 @
9218e742
...
...
@@ -39,7 +39,7 @@ class TestPSPassWithBow(unittest.TestCase):
cond
=
fluid
.
layers
.
less_than
(
cos_q_nt
,
cos_q_pt
)
cond
=
fluid
.
layers
.
cast
(
cond
,
dtype
=
'float64'
)
cond_3
=
paddle
.
sum
(
cond
)
acc
=
fluid
.
layers
.
elementwise_div
(
acc
=
paddle
.
divide
(
cond_3
,
fluid
.
layers
.
fill_constant
(
shape
=
[
1
],
value
=
batch_size
*
1.0
,
dtype
=
'float64'
...
...
@@ -49,13 +49,13 @@ class TestPSPassWithBow(unittest.TestCase):
return
acc
def
get_loss
(
cos_q_pt
,
cos_q_nt
):
loss_op1
=
fluid
.
layers
.
elementwise_sub
(
loss_op1
=
paddle
.
subtract
(
fluid
.
layers
.
fill_constant_batch_size_like
(
input
=
cos_q_pt
,
shape
=
[
-
1
,
1
],
value
=
margin
,
dtype
=
'float32'
),
cos_q_pt
,
)
loss_op2
=
fluid
.
layers
.
elementwise_
add
(
loss_op1
,
cos_q_nt
)
loss_op2
=
paddle
.
add
(
loss_op1
,
cos_q_nt
)
loss_op3
=
paddle
.
maximum
(
fluid
.
layers
.
fill_constant_batch_size_like
(
input
=
loss_op2
,
shape
=
[
-
1
,
1
],
value
=
0.0
,
dtype
=
'float32'
...
...
python/paddle/fluid/tests/unittests/test_dist_fleet_ps5.py
浏览文件 @
9218e742
...
...
@@ -39,7 +39,7 @@ class TestPSPassWithBow(unittest.TestCase):
cond
=
fluid
.
layers
.
less_than
(
cos_q_nt
,
cos_q_pt
)
cond
=
fluid
.
layers
.
cast
(
cond
,
dtype
=
'float64'
)
cond_3
=
paddle
.
sum
(
cond
)
acc
=
fluid
.
layers
.
elementwise_div
(
acc
=
paddle
.
divide
(
cond_3
,
fluid
.
layers
.
fill_constant
(
shape
=
[
1
],
value
=
batch_size
*
1.0
,
dtype
=
'float64'
...
...
@@ -49,13 +49,13 @@ class TestPSPassWithBow(unittest.TestCase):
return
acc
def
get_loss
(
cos_q_pt
,
cos_q_nt
):
loss_op1
=
fluid
.
layers
.
elementwise_sub
(
loss_op1
=
paddle
.
subtract
(
fluid
.
layers
.
fill_constant_batch_size_like
(
input
=
cos_q_pt
,
shape
=
[
-
1
,
1
],
value
=
margin
,
dtype
=
'float32'
),
cos_q_pt
,
)
loss_op2
=
fluid
.
layers
.
elementwise_
add
(
loss_op1
,
cos_q_nt
)
loss_op2
=
paddle
.
add
(
loss_op1
,
cos_q_nt
)
loss_op3
=
paddle
.
maximum
(
fluid
.
layers
.
fill_constant_batch_size_like
(
input
=
loss_op2
,
shape
=
[
-
1
,
1
],
value
=
0.0
,
dtype
=
'float32'
...
...
python/paddle/fluid/tests/unittests/test_dist_fleet_ps6.py
浏览文件 @
9218e742
...
...
@@ -39,7 +39,7 @@ class TestPSPassWithBow(unittest.TestCase):
cond
=
fluid
.
layers
.
less_than
(
cos_q_nt
,
cos_q_pt
)
cond
=
fluid
.
layers
.
cast
(
cond
,
dtype
=
'float64'
)
cond_3
=
paddle
.
sum
(
cond
)
acc
=
fluid
.
layers
.
elementwise_div
(
acc
=
paddle
.
divide
(
cond_3
,
fluid
.
layers
.
fill_constant
(
shape
=
[
1
],
value
=
batch_size
*
1.0
,
dtype
=
'float64'
...
...
@@ -49,13 +49,13 @@ class TestPSPassWithBow(unittest.TestCase):
return
acc
def
get_loss
(
cos_q_pt
,
cos_q_nt
):
loss_op1
=
fluid
.
layers
.
elementwise_sub
(
loss_op1
=
paddle
.
subtract
(
fluid
.
layers
.
fill_constant_batch_size_like
(
input
=
cos_q_pt
,
shape
=
[
-
1
,
1
],
value
=
margin
,
dtype
=
'float32'
),
cos_q_pt
,
)
loss_op2
=
fluid
.
layers
.
elementwise_
add
(
loss_op1
,
cos_q_nt
)
loss_op2
=
paddle
.
add
(
loss_op1
,
cos_q_nt
)
loss_op3
=
paddle
.
maximum
(
fluid
.
layers
.
fill_constant_batch_size_like
(
input
=
loss_op2
,
shape
=
[
-
1
,
1
],
value
=
0.0
,
dtype
=
'float32'
...
...
python/paddle/fluid/tests/unittests/test_dist_transpiler.py
浏览文件 @
9218e742
...
...
@@ -403,9 +403,9 @@ class TestFakeInit(TranspilerTest):
neg_emb_b_vec
=
paddle
.
reshape
(
neg_emb_b
,
shape
=
[
-
1
,
neg_num
])
true_logits
=
fluid
.
layers
.
elementwise_
add
(
true_logits
=
paddle
.
add
(
paddle
.
sum
(
fluid
.
layers
.
elementwise_mul
(
input_emb
,
true_emb_w
),
paddle
.
multiply
(
input_emb
,
true_emb_w
),
dim
=
1
,
keep_dim
=
True
,
),
...
...
@@ -418,7 +418,7 @@ class TestFakeInit(TranspilerTest):
input_emb_re
,
neg_emb_w_re
,
transpose_y
=
True
)
neg_matmul_re
=
paddle
.
reshape
(
neg_matmul
,
shape
=
[
-
1
,
neg_num
])
neg_logits
=
fluid
.
layers
.
elementwise_
add
(
neg_matmul_re
,
neg_emb_b_vec
)
neg_logits
=
paddle
.
add
(
neg_matmul_re
,
neg_emb_b_vec
)
# nce loss
label_ones
=
fluid
.
layers
.
fill_constant_batch_size_like
(
true_logits
,
shape
=
[
-
1
,
1
],
value
=
1.0
,
dtype
=
'float32'
...
...
@@ -433,7 +433,7 @@ class TestFakeInit(TranspilerTest):
neg_xent
=
paddle
.
nn
.
functional
.
binary_cross_entropy_with_logits
(
neg_logits
,
label_zeros
)
cost
=
fluid
.
layers
.
elementwise_
add
(
cost
=
paddle
.
add
(
paddle
.
sum
(
true_xent
,
axis
=
1
),
paddle
.
sum
(
neg_xent
,
axis
=
1
),
)
...
...
python/paddle/fluid/tests/unittests/test_eager_deletion_padding_rnn.py
浏览文件 @
9218e742
...
...
@@ -169,7 +169,7 @@ def lm_model(
nn
=
layers
.
concat
([
input
,
pre_hidden
],
1
)
gate_input
=
layers
.
matmul
(
x
=
nn
,
y
=
weight_1
)
gate_input
=
layers
.
elementwise_
add
(
gate_input
,
bias
)
gate_input
=
paddle
.
add
(
gate_input
,
bias
)
i
=
paddle
.
slice
(
gate_input
,
axes
=
[
1
],
starts
=
[
0
],
ends
=
[
hidden_size
]
)
...
...
@@ -293,7 +293,7 @@ def lm_model(
nn
=
layers
.
concat
([
input
,
pre_hidden
],
1
)
gate_input
=
layers
.
matmul
(
x
=
nn
,
y
=
weight_1
)
gate_input
=
layers
.
elementwise_
add
(
gate_input
,
bias
)
gate_input
=
paddle
.
add
(
gate_input
,
bias
)
i
,
j
,
f
,
o
=
layers
.
split
(
gate_input
,
num_or_sections
=
4
,
dim
=-
1
)
c
=
pre_cell
*
paddle
.
nn
.
functional
.
sigmoid
(
...
...
@@ -460,7 +460,7 @@ def lm_model(
)
projection
=
layers
.
matmul
(
rnn_out
,
softmax_weight
)
projection
=
layers
.
elementwise_
add
(
projection
,
softmax_bias
)
projection
=
paddle
.
add
(
projection
,
softmax_bias
)
projection
=
paddle
.
reshape
(
projection
,
shape
=
[
-
1
,
vocab_size
])
loss
=
layers
.
softmax_with_cross_entropy
(
...
...
python/paddle/fluid/tests/unittests/test_eager_deletion_recurrent_op.py
浏览文件 @
9218e742
...
...
@@ -157,7 +157,7 @@ class EagerDeletionRecurrentOpTest1(unittest.TestCase):
x_t
=
rnn
.
step_input
(
x
)
h
=
paddle
.
scale
(
x
=
layers
.
elementwise_
add
(
x
=
h_pre
,
y
=
x_t
),
x
=
paddle
.
add
(
x
=
h_pre
,
y
=
x_t
),
scale
=
self
.
py_rnn
.
scale
,
)
...
...
@@ -328,9 +328,7 @@ class EagerDeletionRecurrentOpTest2(EagerDeletionRecurrentOpTest1):
bias_attr
=
False
,
)
h
=
paddle
.
nn
.
functional
.
sigmoid
(
x
=
layers
.
elementwise_add
(
x
=
temp_l
,
y
=
temp_r
)
)
h
=
paddle
.
nn
.
functional
.
sigmoid
(
x
=
paddle
.
add
(
x
=
temp_l
,
y
=
temp_r
))
rnn
.
update_memory
(
h_pre
,
h
)
rnn
.
output
(
h
)
...
...
@@ -504,7 +502,7 @@ class EagerDeletionRecurrentOpNoMemBootTest(EagerDeletionRecurrentOpTest1):
with
rnn
.
step
():
mem_pre
=
rnn
.
memory
(
shape
=
[
-
1
,
self
.
input_dim
],
batch_ref
=
x
)
x_t
=
rnn
.
step_input
(
x
)
mem
=
layers
.
elementwise_
add
(
x
=
mem_pre
,
y
=
x_t
)
mem
=
paddle
.
add
(
x
=
mem_pre
,
y
=
x_t
)
rnn
.
update_memory
(
mem_pre
,
mem
)
rnn
.
output
(
mem
)
...
...
@@ -584,7 +582,7 @@ class EagerDeletionTwoRecurrentOpsTest(EagerDeletionRecurrentOpTest1):
with
rnn_0
.
step
():
x_t
=
rnn_0
.
step_input
(
x
)
mem_pre
=
rnn_0
.
memory
(
shape
=
[
-
1
,
self
.
input_dim
],
batch_ref
=
x
)
mem
=
layers
.
elementwise_
add
(
x
=
mem_pre
,
y
=
x_t
)
mem
=
paddle
.
add
(
x
=
mem_pre
,
y
=
x_t
)
rnn_0
.
update_memory
(
mem_pre
,
mem
)
rnn_0
.
output
(
mem
)
...
...
@@ -594,8 +592,8 @@ class EagerDeletionTwoRecurrentOpsTest(EagerDeletionRecurrentOpTest1):
x_t
=
rnn_1
.
step_input
(
x
)
last_rnn_output
=
rnn_0
()
last_rnn_sum
=
paddle
.
sum
(
last_rnn_output
)
mem
=
layers
.
elementwise_
add
(
x
=
x_t
,
y
=
last_rnn_sum
)
y
=
layers
.
elementwise_
add
(
x
=
mem_pre
,
y
=
mem
)
mem
=
paddle
.
add
(
x
=
x_t
,
y
=
last_rnn_sum
)
y
=
paddle
.
add
(
x
=
mem_pre
,
y
=
mem
)
rnn_1
.
update_memory
(
mem_pre
,
mem
)
rnn_1
.
output
(
y
)
return
rnn_1
()
...
...
@@ -693,7 +691,7 @@ class EagerDeletionFarwardOnlyRnnAndBackwardRnnTest(
x_t
=
forward_only_rnn
.
step_input
(
x
)
h
=
paddle
.
scale
(
x
=
layers
.
elementwise_
add
(
x
=
h_pre
,
y
=
x_t
),
x
=
paddle
.
add
(
x
=
h_pre
,
y
=
x_t
),
scale
=
self
.
py_rnn
.
scale
,
)
...
...
@@ -709,7 +707,7 @@ class EagerDeletionFarwardOnlyRnnAndBackwardRnnTest(
x_t
=
rnn
.
step_input
(
x
)
h
=
paddle
.
scale
(
x
=
layers
.
elementwise_
add
(
x
=
h_pre
,
y
=
x_t
),
x
=
paddle
.
add
(
x
=
h_pre
,
y
=
x_t
),
scale
=
self
.
py_rnn
.
scale
,
)
...
...
python/paddle/fluid/tests/unittests/test_elementwise_nn_grad.py
浏览文件 @
9218e742
...
...
@@ -94,7 +94,7 @@ class TestElementwiseAddDoubleGradCheck(unittest.TestCase):
y
=
layers
.
data
(
'y'
,
shape
,
False
,
dtype
)
x
.
persistable
=
True
y
.
persistable
=
True
out
=
layers
.
elementwise_
add
(
x
,
y
)
out
=
paddle
.
add
(
x
,
y
)
x_arr
=
np
.
random
.
uniform
(
-
1
,
1
,
shape
).
astype
(
dtype
)
y_arr
=
np
.
random
.
uniform
(
-
1
,
1
,
shape
).
astype
(
dtype
)
...
...
@@ -155,7 +155,7 @@ class TestElementwiseSubDoubleGradCheck(unittest.TestCase):
y
=
layers
.
data
(
'y'
,
shape
,
False
,
dtype
)
x
.
persistable
=
True
y
.
persistable
=
True
out
=
layers
.
elementwise_sub
(
x
,
y
)
out
=
paddle
.
subtract
(
x
,
y
)
x_arr
=
np
.
random
.
uniform
(
-
1
,
1
,
shape
).
astype
(
dtype
)
y_arr
=
np
.
random
.
uniform
(
-
1
,
1
,
shape
).
astype
(
dtype
)
...
...
@@ -291,7 +291,7 @@ class TestElementwiseAddTripleGradCheck(unittest.TestCase):
y
=
layers
.
data
(
'y'
,
shape
,
False
,
dtype
)
x
.
persistable
=
True
y
.
persistable
=
True
out
=
layers
.
elementwise_
add
(
x
,
y
)
out
=
paddle
.
add
(
x
,
y
)
x_arr
=
np
.
random
.
uniform
(
-
1
,
1
,
shape
).
astype
(
dtype
)
y_arr
=
np
.
random
.
uniform
(
-
1
,
1
,
shape
).
astype
(
dtype
)
...
...
python/paddle/fluid/tests/unittests/test_image_classification_layer.py
浏览文件 @
9218e742
...
...
@@ -14,6 +14,7 @@
import
unittest
import
paddle
import
paddle.fluid
as
fluid
import
paddle.fluid.nets
as
nets
from
paddle.fluid.framework
import
Program
...
...
@@ -81,7 +82,7 @@ class TestLayer(unittest.TestCase):
image2
=
fluid
.
layers
.
data
(
name
=
'pixel2'
,
shape
=
[
3
,
48
,
48
],
dtype
=
'float32'
)
fluid
.
layers
.
elementwise_add
(
x
=
image1
,
y
=
image2
,
act
=
'relu'
)
paddle
.
nn
.
functional
.
relu
(
paddle
.
add
(
x
=
image1
,
y
=
image2
)
)
print
(
main_program
)
...
...
python/paddle/fluid/tests/unittests/test_imperative_basic.py
浏览文件 @
9218e742
...
...
@@ -33,7 +33,7 @@ class MyLayer(fluid.Layer):
def
forward
(
self
,
inputs
):
x
=
fluid
.
layers
.
relu
(
inputs
)
self
.
_x_for_debug
=
x
x
=
fluid
.
layers
.
elementwise_mul
(
x
,
x
)
x
=
paddle
.
multiply
(
x
,
x
)
x
=
paddle
.
sum
(
x
)
return
[
x
]
...
...
@@ -722,9 +722,9 @@ class TestImperative(unittest.TestCase):
inp1
=
paddle
.
to_tensor
(
np_inp1
)
inp2
=
paddle
.
to_tensor
(
np_inp2
)
if
np
.
sum
(
np_inp1
)
<
np
.
sum
(
np_inp2
):
x
=
fluid
.
layers
.
elementwise_
add
(
inp1
,
inp2
)
x
=
paddle
.
add
(
inp1
,
inp2
)
else
:
x
=
fluid
.
layers
.
elementwise_sub
(
inp1
,
inp2
)
x
=
paddle
.
subtract
(
inp1
,
inp2
)
dygraph_result
=
x
.
numpy
()
# static graph
...
...
@@ -750,13 +750,13 @@ class TestImperative(unittest.TestCase):
with
ie
.
true_block
():
d1
=
ie
.
input
(
inp_data1
)
d2
=
ie
.
input
(
inp_data2
)
d3
=
fluid
.
layers
.
elementwise_
add
(
d1
,
d2
)
d3
=
paddle
.
add
(
d1
,
d2
)
ie
.
output
(
d3
)
with
ie
.
false_block
():
d1
=
ie
.
input
(
inp_data1
)
d2
=
ie
.
input
(
inp_data2
)
d3
=
fluid
.
layers
.
elementwise_sub
(
d1
,
d2
)
d3
=
paddle
.
subtract
(
d1
,
d2
)
ie
.
output
(
d3
)
out
=
ie
()
...
...
python/paddle/fluid/tests/unittests/test_imperative_deepcf.py
浏览文件 @
9218e742
...
...
@@ -76,7 +76,7 @@ class DMF(fluid.Layer):
for
ul
,
il
in
zip
(
self
.
_user_layers
,
self
.
_item_layers
):
users
=
ul
(
users
)
items
=
il
(
items
)
return
fluid
.
layers
.
elementwise_mul
(
users
,
items
)
return
paddle
.
multiply
(
users
,
items
)
class
MLP
(
fluid
.
Layer
):
...
...
python/paddle/fluid/tests/unittests/test_imperative_lod_tensor_to_selected_rows.py
浏览文件 @
9218e742
...
...
@@ -67,7 +67,7 @@ class SimpleNet(fluid.Layer):
projection
=
fluid
.
layers
.
matmul
(
x_emb
,
paddle
.
transpose
(
self
.
embedding
.
weight
,
perm
=
[
1
,
0
])
)
projection
=
fluid
.
layers
.
elementwise_
add
(
projection
,
self
.
softmax_bias
)
projection
=
paddle
.
add
(
projection
,
self
.
softmax_bias
)
projection
=
paddle
.
reshape
(
projection
,
shape
=
[
-
1
,
self
.
vocab_size
])
loss
=
fluid
.
layers
.
softmax_with_cross_entropy
(
logits
=
projection
,
label
=
label
,
soft_label
=
False
...
...
python/paddle/fluid/tests/unittests/test_imperative_ocr_attention_model.py
浏览文件 @
9218e742
...
...
@@ -306,9 +306,7 @@ class SimpleAttention(fluid.dygraph.Layer):
decoder_state_proj_reshape
,
[
-
1
,
encoder_proj
.
shape
[
1
],
-
1
],
)
concated
=
fluid
.
layers
.
elementwise_add
(
encoder_proj
,
decoder_state_expand
)
concated
=
paddle
.
add
(
encoder_proj
,
decoder_state_expand
)
concated
=
paddle
.
tanh
(
x
=
concated
)
attention_weight
=
self
.
fc_2
(
concated
)
...
...
@@ -362,7 +360,7 @@ class GRUDecoderWithAttention(fluid.dygraph.Layer):
)
fc_1
=
self
.
fc_1_layer
(
context
)
fc_2
=
self
.
fc_2_layer
(
current_word
)
decoder_inputs
=
fluid
.
layers
.
elementwise_
add
(
x
=
fc_1
,
y
=
fc_2
)
decoder_inputs
=
paddle
.
add
(
x
=
fc_1
,
y
=
fc_2
)
h
,
_
,
_
=
self
.
gru_unit
(
decoder_inputs
,
hidden_mem
)
hidden_mem
=
h
...
...
python/paddle/fluid/tests/unittests/test_imperative_parallel_coalesce_split.py
浏览文件 @
9218e742
...
...
@@ -35,7 +35,7 @@ class MyLayer(fluid.Layer):
def
forward
(
self
,
inputs
):
x
=
fluid
.
layers
.
relu
(
inputs
)
x
=
fluid
.
layers
.
elementwise_mul
(
x
,
x
)
x
=
paddle
.
multiply
(
x
,
x
)
x
=
paddle
.
sum
(
x
)
return
[
x
]
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录