提交 5cef7a2f 编写于 作者: D danleifeng 提交者: gongweibao

Polish English docs of elementwise_add/sub/mul/div (#20027)

Polish English docs of elementwise_add/sub/mul/div
上级 b9163350
......@@ -236,10 +236,10 @@ paddle.fluid.layers.unique_with_counts (ArgSpec(args=['x', 'dtype'], varargs=Non
paddle.fluid.layers.expand (ArgSpec(args=['x', 'expand_times', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '7b97042c3ba55fb5fec6a06308523b73'))
paddle.fluid.layers.sequence_concat (ArgSpec(args=['input', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'b992616c1afbd6b0c2a897ac23036381'))
paddle.fluid.layers.scale (ArgSpec(args=['x', 'scale', 'bias', 'bias_after_scale', 'act', 'name'], varargs=None, keywords=None, defaults=(1.0, 0.0, True, None, None)), ('document', '463e4713806e5adaa4d20a41e2218453'))
paddle.fluid.layers.elementwise_add (ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None)), ('document', '7fa4f12d3dad010f3862df271b31e4de'))
paddle.fluid.layers.elementwise_div (ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None)), ('document', '39ee2e90c1ede44e47f279fc466f3151'))
paddle.fluid.layers.elementwise_sub (ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None)), ('document', '890017540bd2f982f80da81a98832609'))
paddle.fluid.layers.elementwise_mul (ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None)), ('document', '7994818219805a2ec34a37cd9baceeb7'))
paddle.fluid.layers.elementwise_add (ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None)), ('document', '77ab8a79746ce9b96625c6195c27dfbd'))
paddle.fluid.layers.elementwise_div (ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None)), ('document', '128d140ac78c610c35fc38663baf9654'))
paddle.fluid.layers.elementwise_sub (ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None)), ('document', '061219cf5a710c090eb5b31d0a0d841d'))
paddle.fluid.layers.elementwise_mul (ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None)), ('document', '57d99bd329b8ea842802a7ea52724163'))
paddle.fluid.layers.elementwise_max (ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None)), ('document', '3b3c2e528712552f6f44aef88796321d'))
paddle.fluid.layers.elementwise_min (ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None)), ('document', '817e8ce2b39de9b4a94b1b6d592144e0'))
paddle.fluid.layers.elementwise_pow (ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None)), ('document', 'b5e3964c8711058634cf5b57b4884258'))
......
......@@ -20,6 +20,28 @@ limitations under the License. */
namespace paddle {
namespace operators {
class ElementwiseAddOpMaker : public ElementwiseOpMaker {
protected:
std::string GetName() const override { return "Add"; }
std::string GetEquation() const override { return "Out = X + Y"; }
void AddInputX() override {
AddInput("X",
"(Variable), Tensor or LoDTensor of any dimensions. Its dtype "
"should be int32, int64, float32, float64.");
}
void AddInputY() override {
AddInput("Y",
"(Variable), Tensor or LoDTensor of any dimensions. Its dtype "
"should be int32, int64, float32, float64.");
}
std::string GetOpFuntionality() const override {
return "Add two tensors element-wise";
}
};
class ElementwiseAddDoubleGradDescMaker
: public framework::SingleGradOpDescMaker {
public:
......@@ -45,10 +67,10 @@ class ElementwiseAddDoubleGradDescMaker
} // namespace paddle
REGISTER_ELEMWISE_GRAD_MAKER(elementwise_add, Add);
REGISTER_ELEMWISE_EXPLICIT_OP_WITHOUT_GRAD(elementwise_add, "Add",
"Out = X + Y");
REGISTER_ELEMWISE_EXPLICIT_OP_WITHOUT_GRAD(elementwise_add, Add);
namespace ops = paddle::operators;
REGISTER_OPERATOR(elementwise_add_grad, ops::ElementwiseOpExplicitGrad,
ops::ElementwiseGradOpInplace,
ops::ElementwiseGradNoBufVarsInference,
......
......@@ -24,6 +24,22 @@ class ElementwiseDivOpMaker : public ElementwiseOpMaker {
protected:
std::string GetName() const override { return "Div"; }
std::string GetEquation() const override { return "Out = X / Y"; }
void AddInputX() override {
AddInput("X",
"(Variable), Tensor or LoDTensor of any dimensions. Its dtype "
"should be int32, int64, float32, float64.");
}
void AddInputY() override {
AddInput("Y",
"(Variable), Tensor or LoDTensor of any dimensions. Its dtype "
"should be int32, int64, float32, float64.");
}
std::string GetOpFuntionality() const override {
return "Divide two tensors element-wise";
}
};
class ElementwiseDivGradOpDescMaker : public framework::SingleGradOpDescMaker {
......
......@@ -20,6 +20,28 @@ limitations under the License. */
namespace paddle {
namespace operators {
class ElementwiseMulOpMaker : public ElementwiseOpMaker {
protected:
std::string GetName() const override { return "Mul"; }
std::string GetEquation() const override { return "Out = X \\\\odot Y"; }
void AddInputX() override {
AddInput("X",
"(Variable), Tensor or LoDTensor of any dimensions. Its dtype "
"should be int32, int64, float32, float64.");
}
void AddInputY() override {
AddInput("Y",
"(Variable), Tensor or LoDTensor of any dimensions. Its dtype "
"should be int32, int64, float32, float64.");
}
std::string GetOpFuntionality() const override {
return "Multiply two tensors element-wise";
}
};
class ElementwiseMulOpGradDescMaker : public framework::SingleGradOpDescMaker {
public:
using framework::SingleGradOpDescMaker::SingleGradOpDescMaker;
......@@ -38,12 +60,6 @@ class ElementwiseMulOpGradDescMaker : public framework::SingleGradOpDescMaker {
}
};
class ElementwiseMulOpMaker : public ElementwiseOpMaker {
protected:
virtual std::string GetName() const { return "Mul"; }
virtual std::string GetEquation() const { return "Out = X \\\\odot Y"; }
};
class ElementwiseMulDoubleGradDescMaker
: public framework::SingleGradOpDescMaker {
public:
......
......@@ -413,15 +413,9 @@ DECLARE_NO_NEED_BUFFER_VARS_INFERENCE(ElementwiseDoubleGradNoBufVarsInference,
::paddle::operators::ElementwiseGradOpInplace, \
::paddle::operators::ElementwiseGradNoBufVarsInference)
#define REGISTER_ELEMWISE_EXPLICIT_OP_WITHOUT_GRAD(op_type, op_name, equation) \
class __ElemwiseOp##op_type##Maker__ \
: public ::paddle::operators::ElementwiseOpMaker { \
protected: \
virtual std::string GetName() const { return op_name; } \
virtual std::string GetEquation() const { return equation; } \
}; \
#define REGISTER_ELEMWISE_EXPLICIT_OP_WITHOUT_GRAD(op_type, op_name) \
REGISTER_OPERATOR(op_type, ::paddle::operators::ElementwiseOp, \
__ElemwiseOp##op_type##Maker__, \
::paddle::operators::Elementwise##op_name##OpMaker, \
::paddle::operators::ElementwiseOpInferVarType, \
op_type##GradMaker, \
::paddle::operators::ElementwiseOpInplace);
......@@ -20,6 +20,28 @@ limitations under the License. */
namespace paddle {
namespace operators {
class ElementwiseSubOpMaker : public ElementwiseOpMaker {
protected:
std::string GetName() const override { return "Sub"; }
std::string GetEquation() const override { return "Out = X - Y"; }
void AddInputX() override {
AddInput("X",
"(Variable), Tensor or LoDTensor of any dimensions. Its dtype "
"should be int32, int64, float32, float64.");
}
void AddInputY() override {
AddInput("Y",
"(Variable), Tensor or LoDTensor of any dimensions. Its dtype "
"should be int32, int64, float32, float64.");
}
std::string GetOpFuntionality() const override {
return "Substract two tensors element-wise";
}
};
class ElementwiseSubDoubleGradDescMaker
: public framework::SingleGradOpDescMaker {
public:
......@@ -46,8 +68,7 @@ class ElementwiseSubDoubleGradDescMaker
namespace ops = paddle::operators;
REGISTER_ELEMWISE_GRAD_MAKER(elementwise_sub, Sub);
REGISTER_ELEMWISE_EXPLICIT_OP_WITHOUT_GRAD(elementwise_sub, "Sub",
"Out = X - Y");
REGISTER_ELEMWISE_EXPLICIT_OP_WITHOUT_GRAD(elementwise_sub, Sub);
REGISTER_OPERATOR(elementwise_sub_grad, ops::ElementwiseOpExplicitGrad,
ops::ElementwiseGradOpInplace,
......
......@@ -11702,18 +11702,310 @@ def scale(x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None):
def elementwise_add(x, y, axis=-1, act=None, name=None):
"""
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.array([2, 3, 4]),
"y": np.array([1, 5, 2])
}
x = fluid.layers.data(name="x", shape=[3], dtype='float32')
y = fluid.layers.data(name="y", shape=[3], dtype='float32')
z = fluid.layers.elementwise_add(x, y)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value) #[3., 8., 6.]
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.ones((2, 3, 4, 5)).astype('float32'),
"y": np.zeros((3, 4)).astype('float32')
}
x = fluid.layers.data(name="x", shape=[2,3,4,5], dtype='float32')
y = fluid.layers.data(name="y", shape=[3,4], dtype='float32')
z = fluid.layers.elementwise_add(x, y, axis=1)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value) # z.shape=[2,3,4,5]
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.random.randint(1, 5, size=[2, 3, 4, 5]).astype('float32'),
"y": np.random.randint(1, 5, size=[5]).astype('float32')
}
x = fluid.layers.data(name="x", shape=[2,3,4,5], dtype='float32')
y = fluid.layers.data(name="y", shape=[3,4], dtype='float32')
z = fluid.layers.elementwise_add(x, y, axis=3)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value) # z.shape=[2,3,4,5]
"""
return _elementwise_op(LayerHelper('elementwise_add', **locals()))
def elementwise_div(x, y, axis=-1, act=None, name=None):
"""
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.array([2, 3, 4]),
"y": np.array([1, 5, 2])
}
x = fluid.layers.data(name="x", shape=[3], dtype='float32')
y = fluid.layers.data(name="y", shape=[3], dtype='float32')
z = fluid.layers.elementwise_div(x, y)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value) #[2., 0.6, 2.]
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.ones((2, 3, 4, 5)).astype('float32'),
"y": np.zeros((3, 4)).astype('float32')
}
x = fluid.layers.data(name="x", shape=[2,3,4,5], dtype='float32')
y = fluid.layers.data(name="y", shape=[3,4], dtype='float32')
z = fluid.layers.elementwise_div(x, y, axis=1)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value) # z.shape=[2,3,4,5]
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.random.randint(1, 5, size=[2, 3, 4, 5]).astype('float32'),
"y": np.random.randint(1, 5, size=[5]).astype('float32')
}
x = fluid.layers.data(name="x", shape=[2,3,4,5], dtype='float32')
y = fluid.layers.data(name="y", shape=[3,4], dtype='float32')
z = fluid.layers.elementwise_div(x, y, axis=3)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value) # z.shape=[2,3,4,5]
"""
return _elementwise_op(LayerHelper('elementwise_div', **locals()))
def elementwise_sub(x, y, axis=-1, act=None, name=None):
"""
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.array([2, 3, 4]),
"y": np.array([1, 5, 2])
}
x = fluid.layers.data(name="x", shape=[3], dtype='float32')
y = fluid.layers.data(name="y", shape=[3], dtype='float32')
z = fluid.layers.elementwise_sub(x, y)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value) #[1., -2., 2.]
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.ones((2, 3, 4, 5)).astype('float32'),
"y": np.zeros((3, 4)).astype('float32')
}
x = fluid.layers.data(name="x", shape=[2,3,4,5], dtype='float32')
y = fluid.layers.data(name="y", shape=[3,4], dtype='float32')
z = fluid.layers.elementwise_sub(x, y, axis=1)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value) # z.shape=[2,3,4,5]
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.random.randint(1, 5, size=[2, 3, 4, 5]).astype('float32'),
"y": np.random.randint(1, 5, size=[5]).astype('float32')
}
x = fluid.layers.data(name="x", shape=[2,3,4,5], dtype='float32')
y = fluid.layers.data(name="y", shape=[3,4], dtype='float32')
z = fluid.layers.elementwise_sub(x, y, axis=3)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value) # z.shape=[2,3,4,5]
"""
return _elementwise_op(LayerHelper('elementwise_sub', **locals()))
def elementwise_mul(x, y, axis=-1, act=None, name=None):
"""
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.array([2, 3, 4]),
"y": np.array([1, 5, 2])
}
x = fluid.layers.data(name="x", shape=[3], dtype='float32')
y = fluid.layers.data(name="y", shape=[3], dtype='float32')
z = fluid.layers.elementwise_mul(x, y)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value) #[2., 15., 8.]
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.ones((2, 3, 4, 5)).astype('float32'),
"y": np.zeros((3, 4)).astype('float32')
}
x = fluid.layers.data(name="x", shape=[2,3,4,5], dtype='float32')
y = fluid.layers.data(name="y", shape=[3,4], dtype='float32')
z = fluid.layers.elementwise_mul(x, y, axis=1)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value) # z.shape=[2,3,4,5]
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.random.randint(1, 5, size=[2, 3, 4, 5]).astype('float32'),
"y": np.random.randint(1, 5, size=[5]).astype('float32')
}
x = fluid.layers.data(name="x", shape=[2,3,4,5], dtype='float32')
y = fluid.layers.data(name="y", shape=[3,4], dtype='float32')
z = fluid.layers.elementwise_mul(x, y, axis=3)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value) # z.shape=[2,3,4,5]
"""
return _elementwise_op(LayerHelper('elementwise_mul', **locals()))
......@@ -11863,6 +12155,10 @@ def elementwise_floordiv(x, y, axis=-1, act=None, name=None):
for func in [
elementwise_add,
elementwise_div,
elementwise_sub,
elementwise_mul,
elementwise_max,
elementwise_pow,
elementwise_min,
......@@ -11886,10 +12182,6 @@ for func in [
for func in [
elementwise_mod,
elementwise_floordiv,
elementwise_add,
elementwise_div,
elementwise_sub,
elementwise_mul,
]:
op_proto = OpProtoHolder.instance().get_op_proto(func.__name__)
func.__doc__ = _generate_doc_string_(
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册