未验证 提交 74e3f26f 编写于 作者: H HongyuJia 提交者: GitHub

[Clean fluid] Clean fluid elementwise_min/pow/mod/floordiv, remove API (#48040)

* clean fluid elementwise_pow, remove API

* clean elem_pow doc

* clean elementwise_mod

* clean elementwise min, floordiv, mod
上级 099c2302
......@@ -672,7 +672,7 @@ class MultivariateNormalDiag(Distribution):
one_diag = tensor.diag(
tensor.ones(shape=[batch_shape[0]], dtype=self.loc.dtype)
)
inv_diag = nn.elementwise_pow(value, (one_all - 2 * one_diag))
inv_diag = paddle.pow(value, (one_all - 2 * one_diag))
return inv_diag
......
......@@ -58,7 +58,6 @@ from ..data_feeder import (
check_type,
check_dtype,
)
import paddle
from paddle.utils import deprecated
from paddle import _C_ops, _legacy_C_ops
......@@ -151,10 +150,6 @@ __all__ = [
'elementwise_div',
'elementwise_sub',
'elementwise_mul',
'elementwise_min',
'elementwise_pow',
'elementwise_mod',
'elementwise_floordiv',
'uniform_random_batch_size_like',
'gaussian_random',
'sampling_id',
......@@ -12369,187 +12364,11 @@ def elementwise_mul(x, y, axis=-1, act=None, name=None):
return _elementwise_op(LayerHelper('elementwise_mul', **locals()))
def elementwise_min(x, y, axis=-1, act=None, name=None):
"""
:alias_main: paddle.elementwise_min
:alias: paddle.elementwise_min,paddle.tensor.elementwise_min,paddle.tensor.math.elementwise_min
:old_api: paddle.fluid.layers.elementwise_min
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
import paddle
def gen_data():
return {
"x": np.array([2, 3, 4]).astype('float32'),
"y": np.array([1, 5, 2]).astype('float32')
}
paddle.enable_static()
x = fluid.data(name="x", shape=[3], dtype='float32')
y = fluid.data(name="y", shape=[3], dtype='float32')
z = fluid.layers.elementwise_min(x, y)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value) #[1, 3, 2]
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
import paddle
def gen_data():
return {
"x": np.ones((2, 3, 4, 5)).astype('float32'),
"y": np.zeros((3, 4)).astype('float32')
}
paddle.enable_static()
x = fluid.data(name="x", shape=[2,3,4,5], dtype='float32')
y = fluid.data(name="y", shape=[3,4], dtype='float32')
z = fluid.layers.elementwise_min(x, y, axis=1)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value)#[[[[0., 0., 0., 0., 0.] .... [0., 0., 0., 0., 0.]]]]
"""
if _non_static_mode():
return _elementwise_op_in_dygraph(
x, y, axis=axis, act=act, op_name='elementwise_min'
)
return _elementwise_op(LayerHelper('elementwise_min', **locals()))
def elementwise_pow(x, y, axis=-1, act=None, name=None):
"""
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
import paddle
def gen_data():
return {
"x": np.array([2, 3, 4]).astype('float32'),
"y": np.array([1, 5, 2]).astype('float32')
}
paddle.enable_static()
x = fluid.data(name="x", shape=[3], dtype='float32')
y = fluid.data(name="y", shape=[3], dtype='float32')
z = fluid.layers.elementwise_pow(x, y)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value) #[2, 243, 16]
"""
if _non_static_mode():
return _elementwise_op_in_dygraph(
x, y, axis=axis, act=act, op_name='elementwise_pow'
)
return _elementwise_op(LayerHelper('elementwise_pow', **locals()))
@deprecated(since="2.0.0", update_to="paddle.remainder")
def elementwise_mod(x, y, axis=-1, act=None, name=None):
"""
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
import paddle
def gen_data():
return {
"x": np.array([10, 15, 8]).astype('int32'),
"y": np.array([3, 6, 5]).astype('int32')
}
paddle.enable_static()
x = fluid.data(name="x", shape=[3], dtype='int32')
y = fluid.data(name="y", shape=[3], dtype='int32')
z = fluid.layers.elementwise_mod(x, y)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value) #[1, 3, 3]
"""
if _non_static_mode():
return _elementwise_op_in_dygraph(
x, y, axis=axis, act=act, op_name='elementwise_mod'
)
return _elementwise_op(LayerHelper('elementwise_mod', **locals()))
@deprecated(since="2.0.0", update_to="paddle.floor_divide")
def elementwise_floordiv(x, y, axis=-1, act=None, name=None):
"""
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
import paddle
def gen_data():
return {
"x": np.array([10, 15, 8]).astype('int32'),
"y": np.array([3, 7, 5]).astype('int32')
}
paddle.enable_static()
x = fluid.data(name="x", shape=[3], dtype='int32')
y = fluid.data(name="y", shape=[3], dtype='int32')
z = fluid.layers.elementwise_floordiv(x, y)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value) #[3, 2, 1]
"""
if _non_static_mode():
return _elementwise_op_in_dygraph(
x, y, axis=axis, act=act, op_name='elementwise_floordiv'
)
return _elementwise_op(LayerHelper('elementwise_floordiv', **locals()))
for func in [
elementwise_add,
elementwise_div,
elementwise_sub,
elementwise_mul,
elementwise_pow,
elementwise_min,
elementwise_mod,
elementwise_floordiv,
]:
op_proto = OpProtoHolder.instance().get_op_proto(func.__name__)
......
......@@ -1317,10 +1317,8 @@ class BeamSearchDecoder(Decoder):
scores = nn.reshape(scores, [-1, self.beam_size * self.vocab_size])
# TODO: add grad for topk then this beam search can be used to train
topk_scores, topk_indices = paddle.topk(x=scores, k=self.beam_size)
beam_indices = nn.elementwise_floordiv(
topk_indices, self.vocab_size_tensor
)
token_indices = nn.elementwise_mod(topk_indices, self.vocab_size_tensor)
beam_indices = paddle.floor_divide(topk_indices, self.vocab_size_tensor)
token_indices = paddle.remainder(topk_indices, self.vocab_size_tensor)
next_log_probs = self._gather(
nn.reshape(log_probs, [-1, self.beam_size * self.vocab_size]),
topk_indices,
......
......@@ -4802,7 +4802,7 @@ class ExponentialMovingAverage:
)
global_step = layers.cast(global_step, "float32")
decay_var = block._clone_variable(self._decay_var)
decay_pow_acc = layers.elementwise_pow(decay_var, global_step)
decay_pow_acc = paddle.pow(decay_var, global_step)
return decay_pow_acc, global_step
def _create_ema_vars(self, param):
......@@ -7756,7 +7756,7 @@ class LookaheadOptimizer:
shape=[1], dtype='float32', value=1.0
)
mod = layers.elementwise_mod(step, k)
mod = paddle.remainder(step, k)
with layers.control_flow.Switch() as switch:
with switch.case(step == one_var):
for param_name in params:
......
......@@ -459,12 +459,8 @@ class BaseModel(fluid.dygraph.Layer):
input=scores, k=self.beam_size
)
beam_indices = fluid.layers.elementwise_floordiv(
topk_indices, vocab_size_tensor
)
token_indices = fluid.layers.elementwise_mod(
topk_indices, vocab_size_tensor
)
beam_indices = paddle.floor_divide(topk_indices, vocab_size_tensor)
token_indices = paddle.remainder(topk_indices, vocab_size_tensor)
next_log_probs = self._gather(scores, topk_indices, batch_pos)
x = 0
......
......@@ -856,12 +856,8 @@ class Transformer(Layer):
topk_scores, topk_indices = fluid.layers.topk(
input=scores, k=beam_size
)
beam_indices = fluid.layers.elementwise_floordiv(
topk_indices, vocab_size_tensor
)
token_indices = fluid.layers.elementwise_mod(
topk_indices, vocab_size_tensor
)
beam_indices = paddle.floor_divide(topk_indices, vocab_size_tensor)
token_indices = paddle.remainder(topk_indices, vocab_size_tensor)
# update states
caches = map_structure(
......
......@@ -150,7 +150,7 @@ class TestMax(TestMul):
class TestPow(TestMul):
def set_test_op(self):
self.op = paddle.fluid.layers.elementwise_pow
self.op = paddle.pow
class TestMod(TestMul):
......@@ -161,7 +161,7 @@ class TestMod(TestMul):
self.rtol_fp16 = 1e-3
def set_test_op(self):
self.op = paddle.fluid.layers.elementwise_mod
self.op = paddle.remainder
if __name__ == "__main__":
......
......@@ -379,7 +379,7 @@ class TestCondNestedControlFlow(unittest.TestCase):
lambda: fluid.layers.cond(
a == b,
lambda: fluid.layers.elementwise_sub(a, b),
lambda: fluid.layers.elementwise_pow(a, b),
lambda: paddle.pow(a, b),
),
)
append_backward(out)
......
......@@ -614,7 +614,7 @@ class TestLayer(LayerTest):
t6 = layers.data(name='t6', shape=[3, 3], dtype='float32')
ret = layers.elementwise_add(t, t2)
ret = layers.elementwise_pow(ret, t3)
ret = paddle.pow(ret, t3)
ret = layers.elementwise_div(ret, t4)
ret = layers.elementwise_sub(ret, t5)
ret = layers.elementwise_mul(ret, t6)
......@@ -627,14 +627,14 @@ class TestLayer(LayerTest):
with self.dynamic_graph():
with _test_eager_guard():
ret = layers.elementwise_add(to_variable(n), to_variable(n2))
ret = layers.elementwise_pow(ret, to_variable(n3))
ret = paddle.pow(ret, to_variable(n3))
ret = layers.elementwise_div(ret, to_variable(n4))
ret = layers.elementwise_sub(ret, to_variable(n5))
dy_eager_ret = layers.elementwise_mul(ret, to_variable(n6))
dy_eager_ret_value = dy_eager_ret.numpy()
ret = layers.elementwise_add(to_variable(n), to_variable(n2))
ret = layers.elementwise_pow(ret, to_variable(n3))
ret = paddle.pow(ret, to_variable(n3))
ret = layers.elementwise_div(ret, to_variable(n4))
ret = layers.elementwise_sub(ret, to_variable(n5))
dy_ret = layers.elementwise_mul(ret, to_variable(n6))
......
......@@ -97,7 +97,7 @@ def static(
id = fluid.data('id', [1], 'int32')
two = layers.fill_constant([1], 'int32', 2)
mod_two = layers.elementwise_mod(id, two) == 0
mod_two = paddle.remainder(id, two) == 0
if loss_in_switch:
avg_loss = layers.case(
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册