From e62ab3755672ba5027b9f628164b554b50d5e387 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=82=85=E5=89=91=E5=AF=92?= Date: Wed, 23 Nov 2022 18:41:40 +0800 Subject: [PATCH] remove pow in nn.py under fluid (#47878) --- .../contrib/slim/quantization/adaround.py | 3 +- python/paddle/fluid/layers/nn.py | 54 ------------------- .../dygraph_to_static/ifelse_simple_func.py | 2 +- .../tests/unittests/ipu/test_pow_op_ipu.py | 4 +- .../tests/unittests/test_activation_op.py | 25 +-------- .../fluid/tests/unittests/test_name_scope.py | 3 +- 6 files changed, 9 insertions(+), 82 deletions(-) diff --git a/python/paddle/fluid/contrib/slim/quantization/adaround.py b/python/paddle/fluid/contrib/slim/quantization/adaround.py index b253e15918a..257363baf9f 100644 --- a/python/paddle/fluid/contrib/slim/quantization/adaround.py +++ b/python/paddle/fluid/contrib/slim/quantization/adaround.py @@ -19,6 +19,7 @@ import logging import paddle import paddle.fluid as fluid +import paddle from ....log_helper import get_logger from .utils import ( @@ -76,7 +77,7 @@ class AdaRoundLoss: # calculate regularization term - which ensures parameter to converge to exactly zeros and ones # at the end of optimization reg_term = fluid.layers.reduce_sum( - -fluid.layers.pow(paddle.abs(2 * h_v - 1), factor=beta) + 1 + -paddle.pow(paddle.abs(2 * h_v - 1), beta) + 1 ) # calculate the rounding loss diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index f8c74158508..517e86c3f91 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -118,7 +118,6 @@ __all__ = [ 'relu', 'log', 'crop_tensor', - 'pow', 'prelu', 'brelu', 'flatten', @@ -8032,59 +8031,6 @@ def pad2d( return out -@templatedoc() -def pow(x, factor=1.0, name=None): - """ - This is Pow Activation Operator. - - :math:`out = x^{factor}` - - Args: - x(Variable): A ``Tensor`` or ``LoDTensor`` . The data type is ``float32`` or ``float64``. - factor(float32|Variable, optional): A scalar with type ``float32`` or a ``Tensor`` with shape [1] and type ``float32``. The exponential factor of Pow. Default 1.0. - name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` . - - Returns: - Variable: A ``Tensor`` or ``LoDTensor``. The data type is same as ``x``. - - Examples: - - .. code-block:: python - - import paddle.fluid as fluid - - x = fluid.data(name="x", shape=[32,32], dtype="float32") - - # example 1: argument factor is float - y_1 = fluid.layers.pow(x, factor=2.0) - # y_1 is x^{2.0} - - # example 2: argument factor is Variable - factor_tensor = fluid.layers.fill_constant([1], "float32", 3.0) - y_2 = fluid.layers.pow(x, factor=factor_tensor) - # y_2 is x^{3.0} - """ - check_variable_and_dtype( - x, 'x', ['int32', 'int64', 'float16', 'float32', 'float64'], 'pow' - ) - - helper = LayerHelper('pow', **locals()) - inputs = {'X': x} - attrs = {} - if isinstance(factor, Variable): - check_variable_and_dtype(factor, 'factor', ['float32'], 'pow') - factor.stop_gradient = True - inputs['FactorTensor'] = factor - else: - attrs['factor'] = factor - - out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op( - type='pow', inputs=inputs, outputs={'Out': out}, attrs=attrs - ) - return out - - @deprecated(since="2.0.0", update_to="paddle.static.nn.prelu") def prelu(x, mode, param_attr=None, data_format="NCHW", name=None): r""" diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/ifelse_simple_func.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/ifelse_simple_func.py index 8459d0d60e7..3862ab6f942 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/ifelse_simple_func.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/ifelse_simple_func.py @@ -60,7 +60,7 @@ def dyfunc_with_if_else2(x, col=100): if fluid.layers.reduce_mean(x).numpy()[0] > x.numpy()[row][col]: y = fluid.layers.relu(x) else: - x_pow = fluid.layers.pow(x, 2) + x_pow = paddle.pow(x, 2) y = paddle.tanh(x_pow) return y diff --git a/python/paddle/fluid/tests/unittests/ipu/test_pow_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_pow_op_ipu.py index f61a8b8a24e..28b71ab3c88 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_pow_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_pow_op_ipu.py @@ -46,7 +46,7 @@ class TestBase(IPUOpTest): x = paddle.static.data( name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' ) - out = paddle.fluid.layers.pow(x, **self.attrs) + out = paddle.pow(x, **self.attrs) self.fetch_list = [out.name] def run_model(self, exec_mode): @@ -85,7 +85,7 @@ class TestCase1(TestBase): factor = paddle.static.data( name=self.feed_list[1], shape=self.feed_shape[1], dtype='float32' ) - out = paddle.fluid.layers.pow(x, factor=factor, **self.attrs) + out = paddle.pow(x, factor, **self.attrs) self.fetch_list = [out.name] diff --git a/python/paddle/fluid/tests/unittests/test_activation_op.py b/python/paddle/fluid/tests/unittests/test_activation_op.py index abfc0527dbd..39a866434af 100755 --- a/python/paddle/fluid/tests/unittests/test_activation_op.py +++ b/python/paddle/fluid/tests/unittests/test_activation_op.py @@ -2783,8 +2783,8 @@ class TestPow_factor_tensor(TestActivation): factor_1 = 2.0 factor_2 = fluid.layers.fill_constant([1], "float32", 3.0) - out_1 = fluid.layers.pow(x, factor=factor_1) - out_2 = fluid.layers.pow(x, factor=factor_2) + out_1 = paddle.pow(x, factor_1) + out_2 = paddle.pow(x, factor_2) out_4 = paddle.pow(x, factor_1, name='pow_res') out_6 = paddle.pow(x, factor_2) self.assertEqual(('pow_res' in out_4.name), True) @@ -2800,27 +2800,6 @@ class TestPow_factor_tensor(TestActivation): assert np.allclose(res_2, np.power(input, 3)) assert np.allclose(res_6, np.power(input, 3)) - def test_error(self): - in1 = fluid.layers.data( - name="in1", shape=[11, 17], append_batch_size=False, dtype="int32" - ) - in2 = fluid.layers.data( - name="in2", shape=[11, 17], append_batch_size=False, dtype="int64" - ) - in3 = fluid.layers.data( - name="in3", shape=[11, 17], append_batch_size=False, dtype="float32" - ) - in4 = fluid.layers.data( - name="in4", shape=[11, 17], append_batch_size=False, dtype="float64" - ) - - factor_1 = fluid.layers.fill_constant([1], "float64", 3.0) - - self.assertRaises(TypeError, fluid.layers.pow, x=in1, factor=factor_1) - self.assertRaises(TypeError, fluid.layers.pow, x=in2, factor=factor_1) - self.assertRaises(TypeError, fluid.layers.pow, x=in3, factor=factor_1) - self.assertRaises(TypeError, fluid.layers.pow, x=in4, factor=factor_1) - def ref_stanh(x, scale_a=0.67, scale_b=1.7159): out = scale_b * np.tanh(x * scale_a) diff --git a/python/paddle/fluid/tests/unittests/test_name_scope.py b/python/paddle/fluid/tests/unittests/test_name_scope.py index 475ebec9ece..eb61e27e740 100644 --- a/python/paddle/fluid/tests/unittests/test_name_scope.py +++ b/python/paddle/fluid/tests/unittests/test_name_scope.py @@ -14,6 +14,7 @@ import unittest import paddle.fluid as fluid +import paddle class TestNameScope(unittest.TestCase): @@ -26,7 +27,7 @@ class TestNameScope(unittest.TestCase): with fluid.name_scope("s3"): d = c / 1 with fluid.name_scope("s1"): - f = fluid.layers.pow(d, 2.0) + f = paddle.pow(d, 2.0) with fluid.name_scope("s4"): g = f - 1 -- GitLab