diff --git a/python/paddle/fluid/contrib/slim/quantization/adaround.py b/python/paddle/fluid/contrib/slim/quantization/adaround.py index b253e15918a5d55f79142526bfaec087811fe481..257363baf9f3a59396e05bf765c1c3a9d6513949 100644 --- a/python/paddle/fluid/contrib/slim/quantization/adaround.py +++ b/python/paddle/fluid/contrib/slim/quantization/adaround.py @@ -19,6 +19,7 @@ import logging import paddle import paddle.fluid as fluid +import paddle from ....log_helper import get_logger from .utils import ( @@ -76,7 +77,7 @@ class AdaRoundLoss: # calculate regularization term - which ensures parameter to converge to exactly zeros and ones # at the end of optimization reg_term = fluid.layers.reduce_sum( - -fluid.layers.pow(paddle.abs(2 * h_v - 1), factor=beta) + 1 + -paddle.pow(paddle.abs(2 * h_v - 1), beta) + 1 ) # calculate the rounding loss diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index f8c741585081aad89eb502002344f02891942b4a..517e86c3f91d7e91ed55ad8dd96ee44367619737 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -118,7 +118,6 @@ __all__ = [ 'relu', 'log', 'crop_tensor', - 'pow', 'prelu', 'brelu', 'flatten', @@ -8032,59 +8031,6 @@ def pad2d( return out -@templatedoc() -def pow(x, factor=1.0, name=None): - """ - This is Pow Activation Operator. - - :math:`out = x^{factor}` - - Args: - x(Variable): A ``Tensor`` or ``LoDTensor`` . The data type is ``float32`` or ``float64``. - factor(float32|Variable, optional): A scalar with type ``float32`` or a ``Tensor`` with shape [1] and type ``float32``. The exponential factor of Pow. Default 1.0. - name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` . - - Returns: - Variable: A ``Tensor`` or ``LoDTensor``. The data type is same as ``x``. - - Examples: - - .. code-block:: python - - import paddle.fluid as fluid - - x = fluid.data(name="x", shape=[32,32], dtype="float32") - - # example 1: argument factor is float - y_1 = fluid.layers.pow(x, factor=2.0) - # y_1 is x^{2.0} - - # example 2: argument factor is Variable - factor_tensor = fluid.layers.fill_constant([1], "float32", 3.0) - y_2 = fluid.layers.pow(x, factor=factor_tensor) - # y_2 is x^{3.0} - """ - check_variable_and_dtype( - x, 'x', ['int32', 'int64', 'float16', 'float32', 'float64'], 'pow' - ) - - helper = LayerHelper('pow', **locals()) - inputs = {'X': x} - attrs = {} - if isinstance(factor, Variable): - check_variable_and_dtype(factor, 'factor', ['float32'], 'pow') - factor.stop_gradient = True - inputs['FactorTensor'] = factor - else: - attrs['factor'] = factor - - out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op( - type='pow', inputs=inputs, outputs={'Out': out}, attrs=attrs - ) - return out - - @deprecated(since="2.0.0", update_to="paddle.static.nn.prelu") def prelu(x, mode, param_attr=None, data_format="NCHW", name=None): r""" diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/ifelse_simple_func.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/ifelse_simple_func.py index 8459d0d60e7d368e3f8dbb3ea3dc71add6396cae..3862ab6f9420a4c6b2d5bf263567b18384659e5d 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/ifelse_simple_func.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/ifelse_simple_func.py @@ -60,7 +60,7 @@ def dyfunc_with_if_else2(x, col=100): if fluid.layers.reduce_mean(x).numpy()[0] > x.numpy()[row][col]: y = fluid.layers.relu(x) else: - x_pow = fluid.layers.pow(x, 2) + x_pow = paddle.pow(x, 2) y = paddle.tanh(x_pow) return y diff --git a/python/paddle/fluid/tests/unittests/ipu/test_pow_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_pow_op_ipu.py index f61a8b8a24e394901a58849647c2c41d1a4b1841..28b71ab3c885314b1967f8dd6b54411b16762677 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_pow_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_pow_op_ipu.py @@ -46,7 +46,7 @@ class TestBase(IPUOpTest): x = paddle.static.data( name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' ) - out = paddle.fluid.layers.pow(x, **self.attrs) + out = paddle.pow(x, **self.attrs) self.fetch_list = [out.name] def run_model(self, exec_mode): @@ -85,7 +85,7 @@ class TestCase1(TestBase): factor = paddle.static.data( name=self.feed_list[1], shape=self.feed_shape[1], dtype='float32' ) - out = paddle.fluid.layers.pow(x, factor=factor, **self.attrs) + out = paddle.pow(x, factor, **self.attrs) self.fetch_list = [out.name] diff --git a/python/paddle/fluid/tests/unittests/test_activation_op.py b/python/paddle/fluid/tests/unittests/test_activation_op.py index abfc0527dbde45598962e602ba7af3d6f218ca70..39a866434afd67c830c37020cec89a3aa9fb2913 100755 --- a/python/paddle/fluid/tests/unittests/test_activation_op.py +++ b/python/paddle/fluid/tests/unittests/test_activation_op.py @@ -2783,8 +2783,8 @@ class TestPow_factor_tensor(TestActivation): factor_1 = 2.0 factor_2 = fluid.layers.fill_constant([1], "float32", 3.0) - out_1 = fluid.layers.pow(x, factor=factor_1) - out_2 = fluid.layers.pow(x, factor=factor_2) + out_1 = paddle.pow(x, factor_1) + out_2 = paddle.pow(x, factor_2) out_4 = paddle.pow(x, factor_1, name='pow_res') out_6 = paddle.pow(x, factor_2) self.assertEqual(('pow_res' in out_4.name), True) @@ -2800,27 +2800,6 @@ class TestPow_factor_tensor(TestActivation): assert np.allclose(res_2, np.power(input, 3)) assert np.allclose(res_6, np.power(input, 3)) - def test_error(self): - in1 = fluid.layers.data( - name="in1", shape=[11, 17], append_batch_size=False, dtype="int32" - ) - in2 = fluid.layers.data( - name="in2", shape=[11, 17], append_batch_size=False, dtype="int64" - ) - in3 = fluid.layers.data( - name="in3", shape=[11, 17], append_batch_size=False, dtype="float32" - ) - in4 = fluid.layers.data( - name="in4", shape=[11, 17], append_batch_size=False, dtype="float64" - ) - - factor_1 = fluid.layers.fill_constant([1], "float64", 3.0) - - self.assertRaises(TypeError, fluid.layers.pow, x=in1, factor=factor_1) - self.assertRaises(TypeError, fluid.layers.pow, x=in2, factor=factor_1) - self.assertRaises(TypeError, fluid.layers.pow, x=in3, factor=factor_1) - self.assertRaises(TypeError, fluid.layers.pow, x=in4, factor=factor_1) - def ref_stanh(x, scale_a=0.67, scale_b=1.7159): out = scale_b * np.tanh(x * scale_a) diff --git a/python/paddle/fluid/tests/unittests/test_name_scope.py b/python/paddle/fluid/tests/unittests/test_name_scope.py index 475ebec9ecee2b89f866f3c7a73519fa9ba4f43f..eb61e27e740804d2b84a63155390a9dca79cd576 100644 --- a/python/paddle/fluid/tests/unittests/test_name_scope.py +++ b/python/paddle/fluid/tests/unittests/test_name_scope.py @@ -14,6 +14,7 @@ import unittest import paddle.fluid as fluid +import paddle class TestNameScope(unittest.TestCase): @@ -26,7 +27,7 @@ class TestNameScope(unittest.TestCase): with fluid.name_scope("s3"): d = c / 1 with fluid.name_scope("s1"): - f = fluid.layers.pow(d, 2.0) + f = paddle.pow(d, 2.0) with fluid.name_scope("s4"): g = f - 1