未验证 提交 2d0933c3 编写于 作者: S Steffy-zxf 提交者: GitHub

Add two new ops paddle.tensor.mul(), paddle.tensor.pow() (#23485)

*  add new op paddle.tensor.mul(x, y, x_num_col_dims=1, y_num_col_dims=1, out=None, name=None)
* add new op paddle.tensor.pow(input, exponent, out=None, name=None)
* add the aboved two new ops unittest (test api param out and name)
上级 c4d03052
...@@ -110,9 +110,9 @@ from .tensor.math import atan #DEFINE_ALIAS ...@@ -110,9 +110,9 @@ from .tensor.math import atan #DEFINE_ALIAS
# from .tensor.math import floor #DEFINE_ALIAS # from .tensor.math import floor #DEFINE_ALIAS
# from .tensor.math import increment #DEFINE_ALIAS # from .tensor.math import increment #DEFINE_ALIAS
# from .tensor.math import log #DEFINE_ALIAS # from .tensor.math import log #DEFINE_ALIAS
# from .tensor.math import mul #DEFINE_ALIAS from .tensor.math import mul #DEFINE_ALIAS
# from .tensor.math import multiplex #DEFINE_ALIAS # from .tensor.math import multiplex #DEFINE_ALIAS
# from .tensor.math import pow #DEFINE_ALIAS from .tensor.math import pow #DEFINE_ALIAS
# from .tensor.math import reciprocal #DEFINE_ALIAS # from .tensor.math import reciprocal #DEFINE_ALIAS
# from .tensor.math import reduce_max #DEFINE_ALIAS # from .tensor.math import reduce_max #DEFINE_ALIAS
# from .tensor.math import reduce_min #DEFINE_ALIAS # from .tensor.math import reduce_min #DEFINE_ALIAS
......
...@@ -717,24 +717,38 @@ class TestPow_factor_tensor(TestActivation): ...@@ -717,24 +717,38 @@ class TestPow_factor_tensor(TestActivation):
self.check_grad(['X'], 'Out') self.check_grad(['X'], 'Out')
def test_api(self): def test_api(self):
import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
input = np.random.uniform(1, 2, [11, 17]).astype("float32") input = np.random.uniform(1, 2, [11, 17]).astype("float32")
x = fluid.layers.data( x = fluid.layers.data(
name="x", shape=[11, 17], append_batch_size=False, dtype="float32") name="x", shape=[11, 17], append_batch_size=False, dtype="float32")
res = fluid.layers.data(
name="res",
shape=[11, 17],
append_batch_size=False,
dtype="float32")
factor_1 = 2.0 factor_1 = 2.0
factor_2 = fluid.layers.fill_constant([1], "float32", 3.0) factor_2 = fluid.layers.fill_constant([1], "float32", 3.0)
out_1 = fluid.layers.pow(x, factor=factor_1) out_1 = fluid.layers.pow(x, factor=factor_1)
out_2 = fluid.layers.pow(x, factor=factor_2) out_2 = fluid.layers.pow(x, factor=factor_2)
out_3 = paddle.pow(x, factor_1, out=res)
out_4 = paddle.pow(x, factor_1, name='pow_res')
out_5 = paddle.pow(x, factor_1, out=res, name='pow_res')
out_6 = paddle.pow(x, factor_2)
self.assertEqual(('pow_res' in out_4.name), True)
exe = fluid.Executor(place=fluid.CPUPlace()) exe = fluid.Executor(place=fluid.CPUPlace())
res_1, res_2 = exe.run(fluid.default_main_program(), res_1, res_2, res_3, res, res_6 = exe.run(
feed={"x": input}, fluid.default_main_program(),
fetch_list=[out_1, out_2]) feed={"x": input},
fetch_list=[out_1, out_2, out_3, res, out_6])
assert np.array_equal(res_1, np.power(input, 2)) assert np.array_equal(res_1, np.power(input, 2))
assert np.array_equal(res_2, np.power(input, 3)) assert np.array_equal(res_2, np.power(input, 3))
assert np.array_equal(res_3, res)
assert np.array_equal(res_6, np.power(input, 3))
class TestSTanh(TestActivation): class TestSTanh(TestActivation):
......
...@@ -16,6 +16,7 @@ from __future__ import print_function ...@@ -16,6 +16,7 @@ from __future__ import print_function
import unittest import unittest
import numpy as np import numpy as np
import paddle
import paddle.fluid.core as core import paddle.fluid.core as core
from op_test import OpTest from op_test import OpTest
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -174,5 +175,35 @@ class TestFP16MulOp2(TestMulOp2): ...@@ -174,5 +175,35 @@ class TestFP16MulOp2(TestMulOp2):
no_grad_set=set('Y')) no_grad_set=set('Y'))
class TestMulOpAttr(unittest.TestCase):
def test_out(self):
with fluid.program_guard(fluid.Program()):
x = fluid.data(name="x", shape=[2, 3], dtype="float32")
y = fluid.data(name='y', shape=[3, 2], dtype='float32')
res = fluid.data(name="output", shape=[2, 2], dtype="float32")
y_1 = paddle.mul(x, y, out=res)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
data1 = np.array([[1, 2, 3], [4, 5, 6]], dtype='float32')
data2 = np.array([[1, 2], [1, 2], [1, 2]], dtype='float32')
np_res, np_y_1 = exe.run(feed={'x': data1,
'y': data2},
fetch_list=[res, y_1])
self.assertEqual((np_res == np_y_1).all(), True)
def test_name(self):
with fluid.program_guard(fluid.Program()):
x = fluid.data(name="x", shape=[2, 3], dtype="float32")
y = fluid.data(name='y', shape=[3, 2], dtype='float32')
res = fluid.data(name="output", shape=[2, 2], dtype="float32")
y_1 = paddle.mul(x, y, name='mul_res')
y_2 = paddle.mul(x, y, out=res, name='mul_res')
self.assertEqual(('mul_res' in y_1.name), True)
if __name__ == "__main__": if __name__ == "__main__":
unittest.main() unittest.main()
...@@ -86,9 +86,9 @@ from .math import atan #DEFINE_ALIAS ...@@ -86,9 +86,9 @@ from .math import atan #DEFINE_ALIAS
# from .math import floor #DEFINE_ALIAS # from .math import floor #DEFINE_ALIAS
# from .math import increment #DEFINE_ALIAS # from .math import increment #DEFINE_ALIAS
# from .math import log #DEFINE_ALIAS # from .math import log #DEFINE_ALIAS
# from .math import mul #DEFINE_ALIAS from .math import mul #DEFINE_ALIAS
# from .math import multiplex #DEFINE_ALIAS # from .math import multiplex #DEFINE_ALIAS
# from .math import pow #DEFINE_ALIAS from .math import pow #DEFINE_ALIAS
# from .math import reciprocal #DEFINE_ALIAS # from .math import reciprocal #DEFINE_ALIAS
# from .math import reduce_max #DEFINE_ALIAS # from .math import reduce_max #DEFINE_ALIAS
# from .math import reduce_min #DEFINE_ALIAS # from .math import reduce_min #DEFINE_ALIAS
......
...@@ -16,6 +16,7 @@ math functions ...@@ -16,6 +16,7 @@ math functions
""" """
from __future__ import print_function from __future__ import print_function
from paddle.common_ops_import import * from paddle.common_ops_import import *
from ..fluid.framework import core from ..fluid.framework import core
from ..fluid.layers.layer_function_generator import _generate_doc_string_ from ..fluid.layers.layer_function_generator import _generate_doc_string_
...@@ -43,9 +44,9 @@ __all__ = [ ...@@ -43,9 +44,9 @@ __all__ = [
# 'floor', # 'floor',
# 'increment', # 'increment',
# 'log', # 'log',
# 'mul', 'mul',
# 'multiplex', # 'multiplex',
# 'pow', 'pow',
# 'reciprocal', # 'reciprocal',
# 'reduce_max', # 'reduce_max',
# 'reduce_min', # 'reduce_min',
...@@ -143,6 +144,157 @@ Examples: ...@@ -143,6 +144,157 @@ Examples:
""" % op_type """ % op_type
return func return func
@templatedoc()
def pow(input, exponent, out=None, name=None):
"""
This is Pow Activation Operator.
:math:`out = input^{exponent}`
Args:
input(Variable): A ``Tensor`` or ``LoDTensor`` . The data type is ``float32`` or ``float64``.
exponent(float32|Variable): A scalar with type ``float32`` or a ``Tensor`` with shape [1] and type ``float32``.
out (Variable, optional): The Variable that stores results of the operation.
If out is None, a new Variable will be created to store the results.
name(str, optional): The default value is None. Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name` .
Returns:
Variable: A ``Tensor`` or ``LoDTensor``. The data type is same as ``input``.
Examples:
.. code-block:: python
import paddle
x = paddle.fluid.data(name="x", shape=[32,32], dtype="float32")
# example 1: argument exponent is float
res = paddle.fluid.data(name="output", shape=[32,32], dtype="float32")
y_1 = paddle.pow(x, 2.0, out=res)
# y_1 is x^{2.0}
# example 2: argument exponent is Variable
exponet_tensor = fluid.layers.fill_constant([1], "float32", 3.0)
res = paddle.fluid.data(name="output", shape=[32,32], dtype="float32")
y_2 = paddle.pow(x, exponent_tensor, out=res)
# y_2 is x^{3.0}
"""
helper = LayerHelper('pow', **locals())
inputs = {'X': input}
attrs = {}
if isinstance(exponent, Variable):
exponent.stop_gradient = True
inputs['FactorTensor'] = exponent
else:
attrs['factor'] = exponent
if out is None:
out = helper.create_variable_for_type_inference(dtype=input.dtype)
else:
check_dtype(
out.dtype, out.name,
convert_dtype(input.dtype), 'pow',
'(The out data type in pow must be the same with input data type.)')
if name:
warnings.warn(
"The output Variable name of the paddle.tensor.pow operation can only be given by parameter out or name. \
When parameter out and name are set at the same time, out has a higher priority than name. \
Finally, the output Variable name is same as the out name %s"
%
out.name,
category=UserWarning,
stacklevel=2)
helper.append_op(
type='pow', inputs=inputs, outputs={'Out': out}, attrs=attrs)
return out
def mul(x, y, x_num_col_dims=1, y_num_col_dims=1, out=None, name=None):
"""
Mul Operator.
This operator is used to perform matrix multiplication for input $x$ and $y$.
The equation is:
.. math::
Out = x * y
Both the input $x$ and $y$ can carry the LoD (Level of Details) information, or not.
But the output only shares the LoD information with input $x$.
Args:
x (Variable): The first input Tensor/LoDTensor of mul_op.
y (Variable): The second input Tensor/LoDTensor of mul_op.
x_num_col_dims (int, optional): The mul_op can take tensors with more than two dimensions as its inputs.
If the input $x$ is a tensor with more than two dimensions, $x$ will be flattened into a two-dimensional
matrix first. The flattening rule is: the first `num_col_dims` will be flattened to form the first
dimension of the final matrix (the height of the matrix), and the rest `rank(x) - num_col_dims`
dimensions are flattened to form the second dimension of the final matrix (the width of the matrix).
As a result, height of the flattened matrix is equal to the product of $x$'s first `x_num_col_dims` dimensions'
sizes, and width of the flattened matrix is equal to the product of $x$'s last `rank(x) - num_col_dims`
dimensions' size. For example, suppose $x$ is a 6-dimensional tensor with the shape [2, 3, 4, 5, 6],
and `x_num_col_dims` = 3. Thus, the flattened matrix will have a shape [2 x 3 x 4, 5 x 6] = [24, 30]. Default is 1.
y_num_col_dims (int, optional): The mul_op can take tensors with more than two dimensions as its inputs. If the
input $y$ is a tensor with more than two dimensions, $y$ will be flattened into a two-dimensional matrix first.
The attribute `y_num_col_dims` determines how $y$ is flattened. See comments of `x_num_col_dims` for more details.
Default is 1.
out(Variable, optinal): The Variable that stores results of the operation. If out is None,
a new Variable will be created to store the results.
name (str, optional): Name of the output. Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`. Default is None. If both of out and name are not None,
the output name will be same as out.
Returns:
Variable(Tensor/LoDTensor): The output Tensor/LoDTensor of mul op.
Examples:
.. code-block:: python
import paddle
dataX = paddle.fluid.data(name="dataX", append_batch_size = False, shape=[2, 5], dtype="float32")
dataY = paddle.fluid.data(name="dataY", append_batch_size = False, shape=[5, 3], dtype="float32")
res = paddle.fluid.data(name="output", append_batch_size = False, shape=[2, 3], dtype="float32")
output = paddle.mul(dataX, dataY,
x_num_col_dims = 1,
y_num_col_dims = 1,
out=res)
"""
inputs = {"X": [x], "Y": [y]}
attrs = {"x_num_col_dims": x_num_col_dims, "y_num_col_dims": y_num_col_dims}
if in_dygraph_mode():
outs = core.ops.mul(inputs, attrs)
return outs['Out'][0]
helper = LayerHelper("mul", **locals())
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'mul')
check_variable_and_dtype(y, 'y', ['float16', 'float32', 'float64'], 'mul')
if out is None:
out = helper.create_variable_for_type_inference(dtype=x.dtype)
else:
check_dtype(
out.dtype, out.name,
convert_dtype(x.dtype), 'mul',
'(The out data type in pow must be the same with input data type.)')
if name:
warnings.warn(
"The output Variable name of the paddle.tensor.pow operation can only be given by parameter out or name.\
When parameter out and name are set at the same time, out has a higher priority than name. \
Finally, the output Variable name is same as the out name %s"
%
out.name,
category=UserWarning,
stacklevel=2)
helper.append_op(
type="mul", inputs={"X": x,
"Y": y}, attrs=attrs, outputs={"Out": out})
return out
__ops__noattr__ = [ __ops__noattr__ = [
'atan', 'atan',
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册