diff --git a/paddle/fluid/API.spec b/paddle/fluid/API.spec index a94c72ebac3ce63d72ca544b2a8fb103c31aaffc..5e8699d2be9efd85e8e208efd1967650773ed6c9 100644 --- a/paddle/fluid/API.spec +++ b/paddle/fluid/API.spec @@ -236,10 +236,10 @@ paddle.fluid.layers.unique_with_counts (ArgSpec(args=['x', 'dtype'], varargs=Non paddle.fluid.layers.expand (ArgSpec(args=['x', 'expand_times', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '7b97042c3ba55fb5fec6a06308523b73')) paddle.fluid.layers.sequence_concat (ArgSpec(args=['input', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'b992616c1afbd6b0c2a897ac23036381')) paddle.fluid.layers.scale (ArgSpec(args=['x', 'scale', 'bias', 'bias_after_scale', 'act', 'name'], varargs=None, keywords=None, defaults=(1.0, 0.0, True, None, None)), ('document', '463e4713806e5adaa4d20a41e2218453')) -paddle.fluid.layers.elementwise_add (ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None)), ('document', '7fa4f12d3dad010f3862df271b31e4de')) -paddle.fluid.layers.elementwise_div (ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None)), ('document', '39ee2e90c1ede44e47f279fc466f3151')) -paddle.fluid.layers.elementwise_sub (ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None)), ('document', '890017540bd2f982f80da81a98832609')) -paddle.fluid.layers.elementwise_mul (ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None)), ('document', '7994818219805a2ec34a37cd9baceeb7')) +paddle.fluid.layers.elementwise_add (ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None)), ('document', '77ab8a79746ce9b96625c6195c27dfbd')) +paddle.fluid.layers.elementwise_div (ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None)), ('document', '128d140ac78c610c35fc38663baf9654')) +paddle.fluid.layers.elementwise_sub (ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None)), ('document', '061219cf5a710c090eb5b31d0a0d841d')) +paddle.fluid.layers.elementwise_mul (ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None)), ('document', '57d99bd329b8ea842802a7ea52724163')) paddle.fluid.layers.elementwise_max (ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None)), ('document', '3b3c2e528712552f6f44aef88796321d')) paddle.fluid.layers.elementwise_min (ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None)), ('document', '817e8ce2b39de9b4a94b1b6d592144e0')) paddle.fluid.layers.elementwise_pow (ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None)), ('document', 'b5e3964c8711058634cf5b57b4884258')) diff --git a/paddle/fluid/operators/elementwise/elementwise_add_op.cc b/paddle/fluid/operators/elementwise/elementwise_add_op.cc index fd93aa441eda78613422fee4809d7b0d4467fc95..c9168fdf53f7bc3d76f8bd8eedc35b081d8397b3 100644 --- a/paddle/fluid/operators/elementwise/elementwise_add_op.cc +++ b/paddle/fluid/operators/elementwise/elementwise_add_op.cc @@ -20,6 +20,28 @@ limitations under the License. */ namespace paddle { namespace operators { +class ElementwiseAddOpMaker : public ElementwiseOpMaker { + protected: + std::string GetName() const override { return "Add"; } + std::string GetEquation() const override { return "Out = X + Y"; } + + void AddInputX() override { + AddInput("X", + "(Variable), Tensor or LoDTensor of any dimensions. Its dtype " + "should be int32, int64, float32, float64."); + } + + void AddInputY() override { + AddInput("Y", + "(Variable), Tensor or LoDTensor of any dimensions. Its dtype " + "should be int32, int64, float32, float64."); + } + + std::string GetOpFuntionality() const override { + return "Add two tensors element-wise"; + } +}; + class ElementwiseAddDoubleGradDescMaker : public framework::SingleGradOpDescMaker { public: @@ -45,10 +67,10 @@ class ElementwiseAddDoubleGradDescMaker } // namespace paddle REGISTER_ELEMWISE_GRAD_MAKER(elementwise_add, Add); -REGISTER_ELEMWISE_EXPLICIT_OP_WITHOUT_GRAD(elementwise_add, "Add", - "Out = X + Y"); +REGISTER_ELEMWISE_EXPLICIT_OP_WITHOUT_GRAD(elementwise_add, Add); namespace ops = paddle::operators; + REGISTER_OPERATOR(elementwise_add_grad, ops::ElementwiseOpExplicitGrad, ops::ElementwiseGradOpInplace, ops::ElementwiseGradNoBufVarsInference, diff --git a/paddle/fluid/operators/elementwise/elementwise_div_op.cc b/paddle/fluid/operators/elementwise/elementwise_div_op.cc index f025a8452059f9b6f97b9f73ad667d12ccf37a7e..2002e8f31cb6000612cbd30fb82a3da5762daa3c 100644 --- a/paddle/fluid/operators/elementwise/elementwise_div_op.cc +++ b/paddle/fluid/operators/elementwise/elementwise_div_op.cc @@ -24,6 +24,22 @@ class ElementwiseDivOpMaker : public ElementwiseOpMaker { protected: std::string GetName() const override { return "Div"; } std::string GetEquation() const override { return "Out = X / Y"; } + + void AddInputX() override { + AddInput("X", + "(Variable), Tensor or LoDTensor of any dimensions. Its dtype " + "should be int32, int64, float32, float64."); + } + + void AddInputY() override { + AddInput("Y", + "(Variable), Tensor or LoDTensor of any dimensions. Its dtype " + "should be int32, int64, float32, float64."); + } + + std::string GetOpFuntionality() const override { + return "Divide two tensors element-wise"; + } }; class ElementwiseDivGradOpDescMaker : public framework::SingleGradOpDescMaker { diff --git a/paddle/fluid/operators/elementwise/elementwise_mul_op.cc b/paddle/fluid/operators/elementwise/elementwise_mul_op.cc index 69900e0637ce587b326123d326d322fe73c75617..0998b27ea11bec086cdfe580519b3026a5834074 100644 --- a/paddle/fluid/operators/elementwise/elementwise_mul_op.cc +++ b/paddle/fluid/operators/elementwise/elementwise_mul_op.cc @@ -20,6 +20,28 @@ limitations under the License. */ namespace paddle { namespace operators { +class ElementwiseMulOpMaker : public ElementwiseOpMaker { + protected: + std::string GetName() const override { return "Mul"; } + std::string GetEquation() const override { return "Out = X \\\\odot Y"; } + + void AddInputX() override { + AddInput("X", + "(Variable), Tensor or LoDTensor of any dimensions. Its dtype " + "should be int32, int64, float32, float64."); + } + + void AddInputY() override { + AddInput("Y", + "(Variable), Tensor or LoDTensor of any dimensions. Its dtype " + "should be int32, int64, float32, float64."); + } + + std::string GetOpFuntionality() const override { + return "Multiply two tensors element-wise"; + } +}; + class ElementwiseMulOpGradDescMaker : public framework::SingleGradOpDescMaker { public: using framework::SingleGradOpDescMaker::SingleGradOpDescMaker; @@ -38,12 +60,6 @@ class ElementwiseMulOpGradDescMaker : public framework::SingleGradOpDescMaker { } }; -class ElementwiseMulOpMaker : public ElementwiseOpMaker { - protected: - virtual std::string GetName() const { return "Mul"; } - virtual std::string GetEquation() const { return "Out = X \\\\odot Y"; } -}; - class ElementwiseMulDoubleGradDescMaker : public framework::SingleGradOpDescMaker { public: diff --git a/paddle/fluid/operators/elementwise/elementwise_op.h b/paddle/fluid/operators/elementwise/elementwise_op.h index b1578443de93a654fd01a2418b6da9561fb37d0a..74392ab364247dba854eef7ea608ee1d04eb49ae 100644 --- a/paddle/fluid/operators/elementwise/elementwise_op.h +++ b/paddle/fluid/operators/elementwise/elementwise_op.h @@ -413,15 +413,9 @@ DECLARE_NO_NEED_BUFFER_VARS_INFERENCE(ElementwiseDoubleGradNoBufVarsInference, ::paddle::operators::ElementwiseGradOpInplace, \ ::paddle::operators::ElementwiseGradNoBufVarsInference) -#define REGISTER_ELEMWISE_EXPLICIT_OP_WITHOUT_GRAD(op_type, op_name, equation) \ - class __ElemwiseOp##op_type##Maker__ \ - : public ::paddle::operators::ElementwiseOpMaker { \ - protected: \ - virtual std::string GetName() const { return op_name; } \ - virtual std::string GetEquation() const { return equation; } \ - }; \ - REGISTER_OPERATOR(op_type, ::paddle::operators::ElementwiseOp, \ - __ElemwiseOp##op_type##Maker__, \ - ::paddle::operators::ElementwiseOpInferVarType, \ - op_type##GradMaker, \ +#define REGISTER_ELEMWISE_EXPLICIT_OP_WITHOUT_GRAD(op_type, op_name) \ + REGISTER_OPERATOR(op_type, ::paddle::operators::ElementwiseOp, \ + ::paddle::operators::Elementwise##op_name##OpMaker, \ + ::paddle::operators::ElementwiseOpInferVarType, \ + op_type##GradMaker, \ ::paddle::operators::ElementwiseOpInplace); diff --git a/paddle/fluid/operators/elementwise/elementwise_sub_op.cc b/paddle/fluid/operators/elementwise/elementwise_sub_op.cc index b3003092c76fe97fadedaaeab317fbd4364beafb..1692a8c2f235cb0d28dd0f53986aa69b03f0e880 100644 --- a/paddle/fluid/operators/elementwise/elementwise_sub_op.cc +++ b/paddle/fluid/operators/elementwise/elementwise_sub_op.cc @@ -20,6 +20,28 @@ limitations under the License. */ namespace paddle { namespace operators { +class ElementwiseSubOpMaker : public ElementwiseOpMaker { + protected: + std::string GetName() const override { return "Sub"; } + std::string GetEquation() const override { return "Out = X - Y"; } + + void AddInputX() override { + AddInput("X", + "(Variable), Tensor or LoDTensor of any dimensions. Its dtype " + "should be int32, int64, float32, float64."); + } + + void AddInputY() override { + AddInput("Y", + "(Variable), Tensor or LoDTensor of any dimensions. Its dtype " + "should be int32, int64, float32, float64."); + } + + std::string GetOpFuntionality() const override { + return "Substract two tensors element-wise"; + } +}; + class ElementwiseSubDoubleGradDescMaker : public framework::SingleGradOpDescMaker { public: @@ -46,8 +68,7 @@ class ElementwiseSubDoubleGradDescMaker namespace ops = paddle::operators; REGISTER_ELEMWISE_GRAD_MAKER(elementwise_sub, Sub); -REGISTER_ELEMWISE_EXPLICIT_OP_WITHOUT_GRAD(elementwise_sub, "Sub", - "Out = X - Y"); +REGISTER_ELEMWISE_EXPLICIT_OP_WITHOUT_GRAD(elementwise_sub, Sub); REGISTER_OPERATOR(elementwise_sub_grad, ops::ElementwiseOpExplicitGrad, ops::ElementwiseGradOpInplace, diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index fd29c9ed906fa789d5b0073228e144127c95d6f9..8f6c8a5d1279475fb47f367ef7ea5d414e8c6f25 100755 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -11702,18 +11702,310 @@ def scale(x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None): def elementwise_add(x, y, axis=-1, act=None, name=None): + """ +Examples: + + .. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + def gen_data(): + return { + "x": np.array([2, 3, 4]), + "y": np.array([1, 5, 2]) + } + + x = fluid.layers.data(name="x", shape=[3], dtype='float32') + y = fluid.layers.data(name="y", shape=[3], dtype='float32') + z = fluid.layers.elementwise_add(x, y) + + place = fluid.CPUPlace() + exe = fluid.Executor(place) + z_value = exe.run(feed=gen_data(), + fetch_list=[z.name]) + + print(z_value) #[3., 8., 6.] + + + .. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + def gen_data(): + return { + "x": np.ones((2, 3, 4, 5)).astype('float32'), + "y": np.zeros((3, 4)).astype('float32') + } + + x = fluid.layers.data(name="x", shape=[2,3,4,5], dtype='float32') + y = fluid.layers.data(name="y", shape=[3,4], dtype='float32') + z = fluid.layers.elementwise_add(x, y, axis=1) + + place = fluid.CPUPlace() + exe = fluid.Executor(place) + + z_value = exe.run(feed=gen_data(), + fetch_list=[z.name]) + + print(z_value) # z.shape=[2,3,4,5] + + + .. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + def gen_data(): + return { + "x": np.random.randint(1, 5, size=[2, 3, 4, 5]).astype('float32'), + "y": np.random.randint(1, 5, size=[5]).astype('float32') + } + + x = fluid.layers.data(name="x", shape=[2,3,4,5], dtype='float32') + y = fluid.layers.data(name="y", shape=[3,4], dtype='float32') + z = fluid.layers.elementwise_add(x, y, axis=3) + + place = fluid.CPUPlace() + exe = fluid.Executor(place) + + z_value = exe.run(feed=gen_data(), + fetch_list=[z.name]) + print(z_value) # z.shape=[2,3,4,5] + + """ return _elementwise_op(LayerHelper('elementwise_add', **locals())) def elementwise_div(x, y, axis=-1, act=None, name=None): + """ +Examples: + + .. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + def gen_data(): + return { + "x": np.array([2, 3, 4]), + "y": np.array([1, 5, 2]) + } + + x = fluid.layers.data(name="x", shape=[3], dtype='float32') + y = fluid.layers.data(name="y", shape=[3], dtype='float32') + z = fluid.layers.elementwise_div(x, y) + + place = fluid.CPUPlace() + exe = fluid.Executor(place) + z_value = exe.run(feed=gen_data(), + fetch_list=[z.name]) + + print(z_value) #[2., 0.6, 2.] + + + .. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + def gen_data(): + return { + "x": np.ones((2, 3, 4, 5)).astype('float32'), + "y": np.zeros((3, 4)).astype('float32') + } + + x = fluid.layers.data(name="x", shape=[2,3,4,5], dtype='float32') + y = fluid.layers.data(name="y", shape=[3,4], dtype='float32') + z = fluid.layers.elementwise_div(x, y, axis=1) + + place = fluid.CPUPlace() + exe = fluid.Executor(place) + + z_value = exe.run(feed=gen_data(), + fetch_list=[z.name]) + + print(z_value) # z.shape=[2,3,4,5] + + + .. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + def gen_data(): + return { + "x": np.random.randint(1, 5, size=[2, 3, 4, 5]).astype('float32'), + "y": np.random.randint(1, 5, size=[5]).astype('float32') + } + + x = fluid.layers.data(name="x", shape=[2,3,4,5], dtype='float32') + y = fluid.layers.data(name="y", shape=[3,4], dtype='float32') + z = fluid.layers.elementwise_div(x, y, axis=3) + + place = fluid.CPUPlace() + exe = fluid.Executor(place) + + z_value = exe.run(feed=gen_data(), + fetch_list=[z.name]) + print(z_value) # z.shape=[2,3,4,5] + + """ return _elementwise_op(LayerHelper('elementwise_div', **locals())) def elementwise_sub(x, y, axis=-1, act=None, name=None): + """ +Examples: + + .. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + def gen_data(): + return { + "x": np.array([2, 3, 4]), + "y": np.array([1, 5, 2]) + } + + x = fluid.layers.data(name="x", shape=[3], dtype='float32') + y = fluid.layers.data(name="y", shape=[3], dtype='float32') + z = fluid.layers.elementwise_sub(x, y) + + place = fluid.CPUPlace() + exe = fluid.Executor(place) + z_value = exe.run(feed=gen_data(), + fetch_list=[z.name]) + + print(z_value) #[1., -2., 2.] + + + .. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + def gen_data(): + return { + "x": np.ones((2, 3, 4, 5)).astype('float32'), + "y": np.zeros((3, 4)).astype('float32') + } + + x = fluid.layers.data(name="x", shape=[2,3,4,5], dtype='float32') + y = fluid.layers.data(name="y", shape=[3,4], dtype='float32') + z = fluid.layers.elementwise_sub(x, y, axis=1) + + place = fluid.CPUPlace() + exe = fluid.Executor(place) + + z_value = exe.run(feed=gen_data(), + fetch_list=[z.name]) + + print(z_value) # z.shape=[2,3,4,5] + + + .. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + def gen_data(): + return { + "x": np.random.randint(1, 5, size=[2, 3, 4, 5]).astype('float32'), + "y": np.random.randint(1, 5, size=[5]).astype('float32') + } + + x = fluid.layers.data(name="x", shape=[2,3,4,5], dtype='float32') + y = fluid.layers.data(name="y", shape=[3,4], dtype='float32') + z = fluid.layers.elementwise_sub(x, y, axis=3) + + place = fluid.CPUPlace() + exe = fluid.Executor(place) + + z_value = exe.run(feed=gen_data(), + fetch_list=[z.name]) + print(z_value) # z.shape=[2,3,4,5] + + """ return _elementwise_op(LayerHelper('elementwise_sub', **locals())) def elementwise_mul(x, y, axis=-1, act=None, name=None): + """ +Examples: + + .. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + def gen_data(): + return { + "x": np.array([2, 3, 4]), + "y": np.array([1, 5, 2]) + } + + x = fluid.layers.data(name="x", shape=[3], dtype='float32') + y = fluid.layers.data(name="y", shape=[3], dtype='float32') + z = fluid.layers.elementwise_mul(x, y) + + place = fluid.CPUPlace() + exe = fluid.Executor(place) + z_value = exe.run(feed=gen_data(), + fetch_list=[z.name]) + + print(z_value) #[2., 15., 8.] + + + .. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + def gen_data(): + return { + "x": np.ones((2, 3, 4, 5)).astype('float32'), + "y": np.zeros((3, 4)).astype('float32') + } + + x = fluid.layers.data(name="x", shape=[2,3,4,5], dtype='float32') + y = fluid.layers.data(name="y", shape=[3,4], dtype='float32') + z = fluid.layers.elementwise_mul(x, y, axis=1) + + place = fluid.CPUPlace() + exe = fluid.Executor(place) + + z_value = exe.run(feed=gen_data(), + fetch_list=[z.name]) + + print(z_value) # z.shape=[2,3,4,5] + + + .. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + def gen_data(): + return { + "x": np.random.randint(1, 5, size=[2, 3, 4, 5]).astype('float32'), + "y": np.random.randint(1, 5, size=[5]).astype('float32') + } + + x = fluid.layers.data(name="x", shape=[2,3,4,5], dtype='float32') + y = fluid.layers.data(name="y", shape=[3,4], dtype='float32') + z = fluid.layers.elementwise_mul(x, y, axis=3) + + place = fluid.CPUPlace() + exe = fluid.Executor(place) + + z_value = exe.run(feed=gen_data(), + fetch_list=[z.name]) + print(z_value) # z.shape=[2,3,4,5] + + """ return _elementwise_op(LayerHelper('elementwise_mul', **locals())) @@ -11863,6 +12155,10 @@ def elementwise_floordiv(x, y, axis=-1, act=None, name=None): for func in [ + elementwise_add, + elementwise_div, + elementwise_sub, + elementwise_mul, elementwise_max, elementwise_pow, elementwise_min, @@ -11886,10 +12182,6 @@ for func in [ for func in [ elementwise_mod, elementwise_floordiv, - elementwise_add, - elementwise_div, - elementwise_sub, - elementwise_mul, ]: op_proto = OpProtoHolder.instance().get_op_proto(func.__name__) func.__doc__ = _generate_doc_string_(