未验证 提交 e62ab375 编写于 作者: 傅剑寒 提交者: GitHub

remove pow in nn.py under fluid (#47878)

上级 e16d9a8e
......@@ -19,6 +19,7 @@ import logging
import paddle
import paddle.fluid as fluid
import paddle
from ....log_helper import get_logger
from .utils import (
......@@ -76,7 +77,7 @@ class AdaRoundLoss:
# calculate regularization term - which ensures parameter to converge to exactly zeros and ones
# at the end of optimization
reg_term = fluid.layers.reduce_sum(
-fluid.layers.pow(paddle.abs(2 * h_v - 1), factor=beta) + 1
-paddle.pow(paddle.abs(2 * h_v - 1), beta) + 1
)
# calculate the rounding loss
......
......@@ -118,7 +118,6 @@ __all__ = [
'relu',
'log',
'crop_tensor',
'pow',
'prelu',
'brelu',
'flatten',
......@@ -8032,59 +8031,6 @@ def pad2d(
return out
@templatedoc()
def pow(x, factor=1.0, name=None):
"""
This is Pow Activation Operator.
:math:`out = x^{factor}`
Args:
x(Variable): A ``Tensor`` or ``LoDTensor`` . The data type is ``float32`` or ``float64``.
factor(float32|Variable, optional): A scalar with type ``float32`` or a ``Tensor`` with shape [1] and type ``float32``. The exponential factor of Pow. Default 1.0.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` .
Returns:
Variable: A ``Tensor`` or ``LoDTensor``. The data type is same as ``x``.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name="x", shape=[32,32], dtype="float32")
# example 1: argument factor is float
y_1 = fluid.layers.pow(x, factor=2.0)
# y_1 is x^{2.0}
# example 2: argument factor is Variable
factor_tensor = fluid.layers.fill_constant([1], "float32", 3.0)
y_2 = fluid.layers.pow(x, factor=factor_tensor)
# y_2 is x^{3.0}
"""
check_variable_and_dtype(
x, 'x', ['int32', 'int64', 'float16', 'float32', 'float64'], 'pow'
)
helper = LayerHelper('pow', **locals())
inputs = {'X': x}
attrs = {}
if isinstance(factor, Variable):
check_variable_and_dtype(factor, 'factor', ['float32'], 'pow')
factor.stop_gradient = True
inputs['FactorTensor'] = factor
else:
attrs['factor'] = factor
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='pow', inputs=inputs, outputs={'Out': out}, attrs=attrs
)
return out
@deprecated(since="2.0.0", update_to="paddle.static.nn.prelu")
def prelu(x, mode, param_attr=None, data_format="NCHW", name=None):
r"""
......
......@@ -60,7 +60,7 @@ def dyfunc_with_if_else2(x, col=100):
if fluid.layers.reduce_mean(x).numpy()[0] > x.numpy()[row][col]:
y = fluid.layers.relu(x)
else:
x_pow = fluid.layers.pow(x, 2)
x_pow = paddle.pow(x, 2)
y = paddle.tanh(x_pow)
return y
......
......@@ -46,7 +46,7 @@ class TestBase(IPUOpTest):
x = paddle.static.data(
name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32'
)
out = paddle.fluid.layers.pow(x, **self.attrs)
out = paddle.pow(x, **self.attrs)
self.fetch_list = [out.name]
def run_model(self, exec_mode):
......@@ -85,7 +85,7 @@ class TestCase1(TestBase):
factor = paddle.static.data(
name=self.feed_list[1], shape=self.feed_shape[1], dtype='float32'
)
out = paddle.fluid.layers.pow(x, factor=factor, **self.attrs)
out = paddle.pow(x, factor, **self.attrs)
self.fetch_list = [out.name]
......
......@@ -2783,8 +2783,8 @@ class TestPow_factor_tensor(TestActivation):
factor_1 = 2.0
factor_2 = fluid.layers.fill_constant([1], "float32", 3.0)
out_1 = fluid.layers.pow(x, factor=factor_1)
out_2 = fluid.layers.pow(x, factor=factor_2)
out_1 = paddle.pow(x, factor_1)
out_2 = paddle.pow(x, factor_2)
out_4 = paddle.pow(x, factor_1, name='pow_res')
out_6 = paddle.pow(x, factor_2)
self.assertEqual(('pow_res' in out_4.name), True)
......@@ -2800,27 +2800,6 @@ class TestPow_factor_tensor(TestActivation):
assert np.allclose(res_2, np.power(input, 3))
assert np.allclose(res_6, np.power(input, 3))
def test_error(self):
in1 = fluid.layers.data(
name="in1", shape=[11, 17], append_batch_size=False, dtype="int32"
)
in2 = fluid.layers.data(
name="in2", shape=[11, 17], append_batch_size=False, dtype="int64"
)
in3 = fluid.layers.data(
name="in3", shape=[11, 17], append_batch_size=False, dtype="float32"
)
in4 = fluid.layers.data(
name="in4", shape=[11, 17], append_batch_size=False, dtype="float64"
)
factor_1 = fluid.layers.fill_constant([1], "float64", 3.0)
self.assertRaises(TypeError, fluid.layers.pow, x=in1, factor=factor_1)
self.assertRaises(TypeError, fluid.layers.pow, x=in2, factor=factor_1)
self.assertRaises(TypeError, fluid.layers.pow, x=in3, factor=factor_1)
self.assertRaises(TypeError, fluid.layers.pow, x=in4, factor=factor_1)
def ref_stanh(x, scale_a=0.67, scale_b=1.7159):
out = scale_b * np.tanh(x * scale_a)
......
......@@ -14,6 +14,7 @@
import unittest
import paddle.fluid as fluid
import paddle
class TestNameScope(unittest.TestCase):
......@@ -26,7 +27,7 @@ class TestNameScope(unittest.TestCase):
with fluid.name_scope("s3"):
d = c / 1
with fluid.name_scope("s1"):
f = fluid.layers.pow(d, 2.0)
f = paddle.pow(d, 2.0)
with fluid.name_scope("s4"):
g = f - 1
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册