diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index a6402a2852c2af2cbd9beb35ab28736fc361b389..19b431dce0c37b7872cce7e42a040135605065fa 100755 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -8670,10 +8670,6 @@ def random_crop(x, shape, seed=None): def log(x, name=None): """ - :alias_main: paddle.log - :alias: paddle.log,paddle.tensor.log,paddle.tensor.math.log - :old_api: paddle.fluid.layers.log - Calculates the natural log of the given input tensor, element-wise. .. math:: @@ -8681,31 +8677,23 @@ def log(x, name=None): Out = \\ln(x) Args: - x (Variable): Input LoDTensor or Tensor. Must be one of the following types: float32, float64. + x (Tensor): Input Tensor. Must be one of the following types: float32, float64. name (str|None): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` Returns: - Variable: The natural log of the input LoDTensor or Tensor computed element-wise. + Tensor: The natural log of the input Tensor computed element-wise. Examples: .. code-block:: python - import paddle.fluid as fluid - import numpy as np - - # Graph Organizing - x = fluid.layers.data(name="x", shape=[1], dtype="float32") - res = fluid.layers.log(x) - - # Create an executor using CPU as an example - exe = fluid.Executor(fluid.CPUPlace()) + import paddle - # Execute - x_i = np.array([[1], [2]]).astype(np.float32) - res_val, = exe.run(fluid.default_main_program(), feed={'x':x_i}, fetch_list=[res]) - print(res_val) # [[0.], [0.6931472]] + x = [[2,3,4], [7,8,9]] + x = paddle.to_tensor(x, dtype='float32') + res = paddle.log(x) + # [[0.693147, 1.09861, 1.38629], [1.94591, 2.07944, 2.19722]] """ if in_dygraph_mode(): return core.ops.log(x) @@ -8846,33 +8834,36 @@ def mean_iou(input, label, num_classes): Parameters: - input (Variable): A n-D Tensor of prediction results for semantic labels with type int32 or int64. - label (Variable): A Tensor of ground truth labels with type int32 or int64. + input (Tensor): A n-D Tensor of prediction results for semantic labels with type int32 or int64. + label (Tensor): A Tensor of ground truth labels with type int32 or int64. Its shape should be the same as input. num_classes (int32): The possible number of labels. Returns: - Three Variables. + Three Tensors. - - mean_iou(Variable) : A 1-D Tensor representing the mean intersection-over-union with shape [1]. \ + - mean_iou(Tensor) : A 1-D Tensor representing the mean intersection-over-union with shape [1]. \ Data type is float32. - - out_wrong(Variable) : A 1-D Tensor with shape [num_classes]. Data type is int32. \ + - out_wrong(Tensor) : A 1-D Tensor with shape [num_classes]. Data type is int32. \ The wrong numbers of each class. - - out_correct(Variable): A 1-D Tensor with shape [num_classes]. Data type is int32. The correct numbers of each class. + - out_correct(Tensor): A 1-D Tensor with shape [num_classes]. Data type is int32. The correct numbers of each class. Examples: .. code-block:: python - import paddle.fluid as fluid - iou_shape = [None, 32, 32] + import paddle + + iou_shape = [64, 32, 32] num_classes = 5 - predict = fluid.data(name='predict', shape=iou_shape, dtype='int64') - label = fluid.data(name='label', shape=iou_shape, dtype='int64') - mean_iou, out_wrong, out_correct = fluid.layers.mean_iou(predict, label, - num_classes) + predict = paddle.randint(low=0, high=255, shape=iou_shape, dtype='int64') + label = paddle.randint(low=0, high=255, shape=iou_shape, dtype='int64') + mean_iou, out_wrong, out_correct = paddle.metric.mean_iou(predict, label, num_classes) """ + if in_dygraph_mode(): + return core.ops.mean_iou(input, label, 'num_classes', num_classes) + helper = LayerHelper('mean_iou', **locals()) check_variable_and_dtype(input, 'Predictions', ['int32', 'int64'], 'mean_iou') @@ -11387,10 +11378,6 @@ def _elementwise_op(helper): def scale(x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None): """ - :alias_main: paddle.scale - :alias: paddle.scale,paddle.tensor.scale,paddle.tensor.math.scale - :old_api: paddle.fluid.layers.scale - Scale operator. Putting scale and bias to the input Tensor as following: @@ -11406,52 +11393,33 @@ def scale(x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None): Out=scale*(X+bias) Args: - x(Variable): Input N-D Tensor of scale operator. Data type can be float32, float64, int8, int16, int32, int64, uint8. - scale(float|Variable): The scale factor of the input, it should be a float number or a Variable with shape [1] and data type as float32. + x(Tensor): Input N-D Tensor of scale operator. Data type can be float32, float64, int8, int16, int32, int64, uint8. + scale(float|Tensor): The scale factor of the input, it should be a float number or a Tensor with shape [1] and data type as float32. bias(float): The bias to be put on the input. bias_after_scale(bool): Apply bias addition after or before scaling. It is useful for numeric stability in some circumstances. act(str, optional): Activation applied to the output such as tanh, softmax, sigmoid, relu. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` Returns: - Variable(Tensor|LoDTensor): Output tensor of scale operator, with shape and data type same as input. + Tensor: Output tensor of scale operator, with shape and data type same as input. Examples: .. code-block:: python + + # scale as a float32 number + import paddle - import paddle.fluid as fluid - import numpy as np - - inputs = fluid.layers.data(name="x", shape=[2, 3], dtype='float32') - output = fluid.layers.scale(inputs, scale = 2.0, bias = 1.0) - - exe = fluid.Executor(fluid.CPUPlace()) - exe.run(fluid.default_startup_program()) - - img = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32) - - res = exe.run(fluid.default_main_program(), feed={'x':img}, fetch_list=[output]) - print(res) # [array([[ 3., 5., 7.], [ 9., 11., 13.]], dtype=float32)] + data = paddle.randn(shape=[2,3], dtype='float32') + res = paddle.scale(data, scale=2.0, bias=1.0) .. code-block:: python - # scale with parameter scale as Variable - import paddle.fluid as fluid - import numpy as np - - inputs = fluid.layers.data(name="x", shape=[2, 3], dtype='float32') - scale = fluid.layers.data(name="scale", shape=[1], dtype='float32', - append_batch_size=False) - output = fluid.layers.scale(inputs, scale = scale, bias = 1.0) - - exe = fluid.Executor(fluid.CPUPlace()) - exe.run(fluid.default_startup_program()) - - img = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32) - scale_np = np.array([2.]).astype(np.float32) + # scale with parameter scale as a Tensor + import paddle - res = exe.run(fluid.default_main_program(), feed={'x':img, 'scale':scale_np}, fetch_list=[output]) - print(res) # [array([[ 3., 5., 7.], [ 9., 11., 13.]], dtype=float32)] + data = paddle.randn(shape=[2, 3], dtype='float32') + factor = paddle.to_tensor([2], dtype='float32') + res = paddle.scale(data, scale=factor, bias=1.0) """ diff --git a/python/paddle/fluid/layers/ops.py b/python/paddle/fluid/layers/ops.py index 6cdc617a0dc17ae9f0893083285c404ca73712f7..de0fbb16f6241209dfd755a71aab2c101252d17a 100644 --- a/python/paddle/fluid/layers/ops.py +++ b/python/paddle/fluid/layers/ops.py @@ -190,11 +190,9 @@ Examples: .. code-block:: python import paddle - paddle.disable_static() x = paddle.to_tensor([0.1, 0.2, 0.3, 0.4]) out = paddle.rsqrt(x) - print(out.numpy()) # [3.16227766 2.23606798 1.82574186 1.58113883] """) diff --git a/python/paddle/fluid/layers/tensor.py b/python/paddle/fluid/layers/tensor.py index c633f7022d75e1c352f7ae1f51dc324064359e31..6323fe2e4f202c86be7d680c9993e6c91b016460 100644 --- a/python/paddle/fluid/layers/tensor.py +++ b/python/paddle/fluid/layers/tensor.py @@ -1237,26 +1237,26 @@ def load_combine(out, file_path): def has_inf(x): """ - :alias_main: paddle.has_inf - :alias: paddle.has_inf,paddle.tensor.has_inf,paddle.tensor.search.has_inf - :old_api: paddle.fluid.layers.has_inf - Test if any of x contains an infinity number Args: - x (Variable): The Tensor/LoDTensor to be checked. + x (Tensor): The Tensor to be checked. Returns: - Variable: The tensor variable storing the output, only a bool value, indicating that whether there is infinity number in x or not. + Tensor: The tensor storing the output, only a bool value, indicating that whether there is infinity number in x or not. Examples: .. code-block:: python - import paddle.fluid as fluid - data = fluid.layers.data(name="input", shape=[4, 32, 32], dtype="float32") - res = fluid.layers.has_inf(data) + import paddle + data = paddle.randn(shape=[4, 32, 32], dtype="float32") + res = paddle.has_inf(data) + # [False] """ + if in_dygraph_mode(): + return core.ops.isinf(x) + check_type(x, 'x', (Variable), 'has_inf') helper = LayerHelper("isinf", **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) @@ -1266,26 +1266,26 @@ def has_inf(x): def has_nan(x): """ - :alias_main: paddle.has_nan - :alias: paddle.has_nan,paddle.tensor.has_nan,paddle.tensor.search.has_nan - :old_api: paddle.fluid.layers.has_nan - Test if any of x contains a NAN Args: - x (Variable): The Tensor/LoDTensor to be checked. + x (Tensor): The Tensor to be checked. Returns: - Variable: The tensor variable storing the output, only a bool value, indicating that whether there is NAN in x or not. + Tensor: The tensor variable storing the output, only a bool value, indicating that whether there is NAN in x or not. Examples: .. code-block:: python - import paddle.fluid as fluid - data = fluid.layers.data(name="input", shape=[4, 32, 32], dtype="float32") - res = fluid.layers.has_nan(data) + import paddle + data = paddle.randn(shape=[2,3], dtype="float32") + res = paddle.has_nan(data) + # [False] """ + if in_dygraph_mode(): + return core.ops.isnan(x) + check_type(x, 'x', (Variable), 'has_nan') helper = LayerHelper("isnan", **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) diff --git a/python/paddle/fluid/tests/unittests/test_isfinite_op.py b/python/paddle/fluid/tests/unittests/test_isfinite_op.py index 4c7add3f271a2ce8dfec9dea0bac8fad4dd7ca41..743bdbc5a42a8a44bab71e0bdd0567afdf3fffe3 100644 --- a/python/paddle/fluid/tests/unittests/test_isfinite_op.py +++ b/python/paddle/fluid/tests/unittests/test_isfinite_op.py @@ -14,6 +14,7 @@ import unittest import numpy as np +import paddle import paddle.fluid as fluid import paddle.fluid.core as core from op_test import OpTest @@ -132,6 +133,14 @@ class BadInputTest(unittest.TestCase): self.assertRaises(TypeError, test_has_nan_bad_x) + with fluid.dygraph.guard(): + data = paddle.zeros([2, 3]) + result = paddle.has_inf(data) + expect_value = np.array([False]) + self.assertEqual((result.numpy() == expect_value).all(), True) + result = paddle.has_nan(data) + self.assertEqual((result.numpy() == expect_value).all(), True) + if __name__ == '__main__': unittest.main() diff --git a/python/paddle/tensor/math.py b/python/paddle/tensor/math.py index 138841fcf074ba29b66d48560c20963286f1a140..2e71ed26a89a20e8d76ce1a9a3abb76cb804998a 100755 --- a/python/paddle/tensor/math.py +++ b/python/paddle/tensor/math.py @@ -1308,33 +1308,25 @@ def min(x, axis=None, keepdim=False, name=None): def log1p(x, name=None): """ - :alias_main: paddle.log1p - :alias: paddle.log1p,paddle.tensor.log1p,paddle.tensor.math.log1p - Calculates the natural log of the given input tensor, element-wise. .. math:: Out = \\ln(x+1) + Args: - x (Variable): Input LoDTensor or Tensor. Must be one of the following types: float32, float64. + x (Tensor): Input Tensor. Must be one of the following types: float32, float64. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` Returns: - Variable: The natural log of the input LoDTensor or Tensor computed element-wise. + Tensor, the natural log of the input Tensor computed element-wise. Examples: .. code-block:: python + import paddle - import paddle.fluid as fluid - import numpy as np - # Graph Organizing - x = fluid.data(name="x", shape=[2,1], dtype="float32") - res = paddle.log1p(x) - # Create an executor using CPU as an example - exe = fluid.Executor(fluid.CPUPlace()) - # Execute - x_i = np.array([[0], [1]]).astype(np.float32) - res_val, = exe.run(fluid.default_main_program(), feed={'x':x_i}, fetch_list=[res]) - print(res_val) # [[0.], [0.6931472]] + + data = paddle.to_tensor([[0], [1]], dtype='float32') + res = paddle.log1p(data) + # [[0.], [0.6931472]] """ if in_dygraph_mode():