未验证 提交 a5ce60b8 编写于 作者: H HongyuJia 提交者: GitHub

clean elementwise doc (#49004)

上级 11c43b0b
......@@ -41,7 +41,7 @@ def sum(input, scope=None, util=None):
input = fluid.layers.cast(some_input, dtype='float32')
cnt = paddle.sum(input)
global_cnt = paddle.static.create_global_var(persistable=True, dtype='float32', shape=[1], value=0)
tmp = fluid.layers.elementwise_add(cnt, global_cnt)
tmp = paddle.add(cnt, global_cnt)
fluid.layers.assign(tmp, global_cnt)
# in train.py, after train or infer
......@@ -121,7 +121,7 @@ def min(input, scope=None, util=None):
input = fluid.layers.cast(some_input, dtype='float32')
cnt = paddle.sum(input)
global_cnt = paddle.static.create_global_var(persistable=True, dtype='float32', shape=[1], value=0)
tmp = fluid.layers.elementwise_min(cnt, global_cnt)
tmp = paddle.minimum(cnt, global_cnt)
fluid.layers.assign(tmp, global_cnt)
# in train.py, after train or infer
......@@ -161,7 +161,7 @@ def auc(stat_pos, stat_neg, scope=None, util=None):
# in model.py
similarity_norm = fluid.layers.sigmoid(fluid.layers.clip(output, min=-15.0, max=15.0))
binary_predict = fluid.layers.concat(
input=[fluid.layers.elementwise_sub(fluid.layers.ceil(similarity_norm), similarity_norm), similarity_norm], axis=1)
input=[paddle.subtract(fluid.layers.ceil(similarity_norm), similarity_norm), similarity_norm], axis=1)
self.auc, batch_auc, [batch_stat_pos, batch_stat_neg, stat_pos, stat_neg] =
paddle.static.auc(input=binary_predict, label=label, curve='ROC', num_thresholds=4096)
......@@ -396,11 +396,11 @@ def acc(correct, total, scope=None, util=None):
acc = fluid.layers.acc(predict, label, k=1, correct=correct, total=total)
global_correct = paddle.static.create_global_var(persistable=True, dtype='float32', shape=[1], value=0)
tmp1 = fluid.layers.elementwise_min(correct, global_correct)
tmp1 = paddle.minimum(correct, global_correct)
fluid.layers.assign(tmp1, global_correct)
global_total = paddle.static.create_global_var(persistable=True, dtype='float32', shape=[1], value=0)
tmp2 = fluid.layers.elementwise_min(total, global_total)
tmp2 = paddle.minimum(total, global_total)
fluid.layers.assign(tmp2, global_total)
# in train.py, after train or infer
......
......@@ -485,7 +485,7 @@ class DataFeeder:
x = fluid.data(name='x', shape=[None, 2, 2])
y = fluid.data(name='y', shape=[None, 2, 2], dtype='float32')
z = fluid.layers.elementwise_add(x, y)
z = paddle.add(x, y)
feeder = fluid.DataFeeder(['x','y'], fluid.CPUPlace())
place_num = 2
......
......@@ -188,7 +188,7 @@ class FleetUtil:
similarity_norm = fluid.layers.sigmoid(fluid.layers.clip(\
emb, min=-15.0, max=15.0), name="similarity_norm")\
binary_predict = fluid.layers.concat(input=[\
fluid.layers.elementwise_sub(\
paddle.subtract(\
fluid.layers.ceil(similarity_norm), similarity_norm),\
similarity_norm], axis=1)
auc, batch_auc, [batch_stat_pos, batch_stat_neg, stat_pos, \
......@@ -1377,7 +1377,7 @@ class FleetUtil:
similarity_norm = fluid.layers.sigmoid(fluid.layers.clip(\
emb, min=-15.0, max=15.0), name="similarity_norm")\
binary_predict = fluid.layers.concat(input=[\
fluid.layers.elementwise_sub(\
paddle.subtract(\
fluid.layers.ceil(similarity_norm), similarity_norm),\
similarity_norm], axis=1)
auc, batch_auc, [batch_stat_pos, batch_stat_neg, stat_pos, \
......@@ -1577,7 +1577,7 @@ class FleetUtil:
similarity_norm = fluid.layers.sigmoid(fluid.layers.clip(\
emb, min=-15.0, max=15.0), name="similarity_norm")\
binary_predict = fluid.layers.concat(input=[\
fluid.layers.elementwise_sub(\
paddle.subtract(\
fluid.layers.ceil(similarity_norm), similarity_norm),\
similarity_norm], axis=1)
auc, batch_auc, [batch_stat_pos, batch_stat_neg, stat_pos, \
......
......@@ -359,7 +359,7 @@ def save_vars(
w = paddle.create_parameter(shape=[784, 200], dtype='float32', name='fc_w')
b = paddle.create_parameter(shape=[200], dtype='float32', name='fc_b')
hidden_w = paddle.matmul(x=data, y=w)
hidden_b = fluid.layers.elementwise_add(hidden_w, b)
hidden_b = paddle.add(hidden_w, b)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(startup_prog)
......@@ -834,7 +834,7 @@ def load_vars(
w = paddle.create_parameter(shape=[784, 200], dtype='float32', name='fc_w')
b = paddle.create_parameter(shape=[200], dtype='float32', name='fc_b')
hidden_w = paddle.matmul(x=data, y=w)
hidden_b = fluid.layers.elementwise_add(hidden_w, b)
hidden_b = paddle.add(hidden_w, b)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(startup_prog)
......@@ -1602,7 +1602,7 @@ def load_inference_model(
w = paddle.create_parameter(shape=[784, 200], dtype='float32')
b = paddle.create_parameter(shape=[200], dtype='float32')
hidden_w = paddle.matmul(x=data, y=w)
hidden_b = fluid.layers.elementwise_add(hidden_w, b)
hidden_b = paddle.add(hidden_w, b)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(startup_prog)
......
......@@ -1088,10 +1088,10 @@ class While:
cond = paddle.less_than(x=i, y=loop_len)
while_op = fluid.layers.While(cond=cond)
with while_op.block():
sums_tensor = fluid.layers.elementwise_add(x=data, y=data)
sums_tensor = paddle.add(x=data, y=data)
fluid.layers.assign(sums_tensor, sums) # Update the value of sums_tensor defined in While to the sums which defined outside of While through layers.assign
i = paddle.increment(x=i, value=1)
data = fluid.layers.elementwise_add(x=data, y=one)
data = paddle.add(x=data, y=one)
paddle.assign(paddle.less_than(x=i, y=loop_len), cond)
feed_data = np.ones(1).astype('float32')
......
......@@ -47,7 +47,7 @@ class TestAssignOpWithLoDTensorArray(unittest.TestCase):
x.stop_gradient = False
y = fluid.layers.fill_constant(
shape=[100, 10], dtype='float32', value=1)
z = fluid.layers.elementwise_add(x=x, y=y)
z = paddle.add(x=x, y=y)
i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0)
init_array = paddle.tensor.array_write(x=z, i=i)
array = fluid.layers.assign(init_array)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册