From a7512db2bc6a5bc6badaaf5b3a32abb42b9463aa Mon Sep 17 00:00:00 2001 From: gongweibao Date: Thu, 26 Sep 2019 11:03:40 +0800 Subject: [PATCH] Polish elementwise max min pow document to add more examples. (#19946) Polish elementwise max min pow document to add more examples --- paddle/fluid/API.spec | 18 +-- .../elementwise/elementwise_max_op.cc | 17 ++ .../elementwise/elementwise_min_op.cc | 17 ++ .../operators/elementwise/elementwise_op.h | 62 ++++--- .../elementwise/elementwise_pow_op.cc | 9 ++ .../fluid/layers/layer_function_generator.py | 9 +- python/paddle/fluid/layers/nn.py | 153 +++++++++++++++++- 7 files changed, 250 insertions(+), 35 deletions(-) diff --git a/paddle/fluid/API.spec b/paddle/fluid/API.spec index 62cb9d39187..51f93f407a5 100644 --- a/paddle/fluid/API.spec +++ b/paddle/fluid/API.spec @@ -236,15 +236,15 @@ paddle.fluid.layers.unique_with_counts (ArgSpec(args=['x', 'dtype'], varargs=Non paddle.fluid.layers.expand (ArgSpec(args=['x', 'expand_times', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '7b97042c3ba55fb5fec6a06308523b73')) paddle.fluid.layers.sequence_concat (ArgSpec(args=['input', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'b992616c1afbd6b0c2a897ac23036381')) paddle.fluid.layers.scale (ArgSpec(args=['x', 'scale', 'bias', 'bias_after_scale', 'act', 'name'], varargs=None, keywords=None, defaults=(1.0, 0.0, True, None, None)), ('document', '463e4713806e5adaa4d20a41e2218453')) -paddle.fluid.layers.elementwise_add (ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None)), ('document', '5c0fb7298aec32525f96d451ae4c2851')) -paddle.fluid.layers.elementwise_div (ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None)), ('document', '1da49b7cda887dd84087ef8c060fcf6a')) -paddle.fluid.layers.elementwise_sub (ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None)), ('document', '992559c8327c61babd2ed25fc9047fbf')) -paddle.fluid.layers.elementwise_mul (ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None)), ('document', '213db11a61dcb0f31159d343cc35e2f5')) -paddle.fluid.layers.elementwise_max (ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None)), ('document', '409167a1409ec31b0d3a2f8852a7943f')) -paddle.fluid.layers.elementwise_min (ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None)), ('document', '4e1322836eb69473d5606bfe346c5375')) -paddle.fluid.layers.elementwise_pow (ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None)), ('document', 'b9e7e9fa1ca28d8b6f07cc59eadb4a02')) -paddle.fluid.layers.elementwise_mod (ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None)), ('document', '614984304f810f3ddae6b489ec01296b')) -paddle.fluid.layers.elementwise_floordiv (ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None)), ('document', 'a8c4b26d899246378e878f169582c7a4')) +paddle.fluid.layers.elementwise_add (ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None)), ('document', '7fa4f12d3dad010f3862df271b31e4de')) +paddle.fluid.layers.elementwise_div (ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None)), ('document', '39ee2e90c1ede44e47f279fc466f3151')) +paddle.fluid.layers.elementwise_sub (ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None)), ('document', '890017540bd2f982f80da81a98832609')) +paddle.fluid.layers.elementwise_mul (ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None)), ('document', '7994818219805a2ec34a37cd9baceeb7')) +paddle.fluid.layers.elementwise_max (ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None)), ('document', '3b3c2e528712552f6f44aef88796321d')) +paddle.fluid.layers.elementwise_min (ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None)), ('document', '817e8ce2b39de9b4a94b1b6d592144e0')) +paddle.fluid.layers.elementwise_pow (ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None)), ('document', 'b5e3964c8711058634cf5b57b4884258')) +paddle.fluid.layers.elementwise_mod (ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None)), ('document', '4101ee1f9280f00dce54054ccc434890')) +paddle.fluid.layers.elementwise_floordiv (ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None)), ('document', '67e6101c31314d4082621e8e443cfb68')) paddle.fluid.layers.uniform_random_batch_size_like (ArgSpec(args=['input', 'shape', 'dtype', 'input_dim_idx', 'output_dim_idx', 'min', 'max', 'seed'], varargs=None, keywords=None, defaults=('float32', 0, 0, -1.0, 1.0, 0)), ('document', 'cfa120e583cd4a5bfa120c8a26f98a28')) paddle.fluid.layers.gaussian_random (ArgSpec(args=['shape', 'mean', 'std', 'seed', 'dtype'], varargs=None, keywords=None, defaults=(0.0, 1.0, 0, 'float32')), ('document', 'ebbf399d4e03190ce5dc9488f05c92f4')) paddle.fluid.layers.sampling_id (ArgSpec(args=['x', 'min', 'max', 'seed', 'dtype'], varargs=None, keywords=None, defaults=(0.0, 1.0, 0, 'float32')), ('document', 'c39b647b6cf08e058d96ee503d5284fe')) diff --git a/paddle/fluid/operators/elementwise/elementwise_max_op.cc b/paddle/fluid/operators/elementwise/elementwise_max_op.cc index b7df9c6f845..40826c7fefd 100644 --- a/paddle/fluid/operators/elementwise/elementwise_max_op.cc +++ b/paddle/fluid/operators/elementwise/elementwise_max_op.cc @@ -24,6 +24,23 @@ class ElementwiseMaxOpMaker : public ElementwiseOpMaker { protected: std::string GetName() const override { return "Max"; } std::string GetEquation() const override { return "Out = max(X, Y)"; } + + void AddInputX() override { + AddInput( + "X", + "(Variable), The first tensor holding the elements to be compared."); + } + + void AddInputY() override { + AddInput( + "Y", + "(Variable), The second tensor holding the elements to be compared."); + } + + std::string GetOpFuntionality() const override { + return "Compare two tensors and returns a new tensor containing the " + "element-wise maxima."; + } }; class ElementwiseMaxGradOpDescMaker : public framework::SingleGradOpDescMaker { diff --git a/paddle/fluid/operators/elementwise/elementwise_min_op.cc b/paddle/fluid/operators/elementwise/elementwise_min_op.cc index f60c0ed8a0f..9f00fc445a4 100644 --- a/paddle/fluid/operators/elementwise/elementwise_min_op.cc +++ b/paddle/fluid/operators/elementwise/elementwise_min_op.cc @@ -24,6 +24,23 @@ class ElementwiseMinOpMaker : public ElementwiseOpMaker { protected: std::string GetName() const override { return "Min"; } std::string GetEquation() const override { return "Out = min(X, Y)"; } + + void AddInputX() override { + AddInput( + "X", + "(Variable), The first tensor holding the elements to be compared."); + } + + void AddInputY() override { + AddInput( + "Y", + "(Variable), The second tensor holding the elements to be compared."); + } + + std::string GetOpFuntionality() const override { + return "Compare two tensors and returns a new tensor containing the " + "element-wise minima."; + } }; class ElementwiseMinGradOpDescMaker : public framework::SingleGradOpDescMaker { diff --git a/paddle/fluid/operators/elementwise/elementwise_op.h b/paddle/fluid/operators/elementwise/elementwise_op.h index da678c5ee43..b1578443de9 100644 --- a/paddle/fluid/operators/elementwise/elementwise_op.h +++ b/paddle/fluid/operators/elementwise/elementwise_op.h @@ -96,12 +96,15 @@ class ElementwiseOpInferVarType class ElementwiseOpMaker : public framework::OpProtoAndCheckerMaker { public: void Make() final { - AddInput("X", "(Tensor), The first input tensor of elementwise op."); - AddInput("Y", "(Tensor), The second input tensor of elementwise op."); - AddOutput("Out", "The output of elementwise op."); + AddInputX(); + AddInputY(); + AddOpOutput(); + AddAttr("axis", - "(int, default -1). The start dimension index " - "for broadcasting Y onto X.") + "(int, default -1). If X.dimension != Y.dimension," + "Y.dimension must be a subsequence of x.dimension. And axis " + "is the start dimension index " + "for broadcasting Y onto X. ") .SetDefault(-1) .EqualGreaterThan(-1); AddAttr("use_mkldnn", "(bool, default false). Used by MKLDNN.") @@ -120,14 +123,41 @@ class ElementwiseOpMaker : public framework::OpProtoAndCheckerMaker { "Defaults to \"\". Specify the data format of the output data, " "the input will be transformed automatically. ") .SetDefault(""); - AddComment(string::Sprintf(R"DOC( -Elementwise %s Operator + + AddOpComment(); + } + + protected: + virtual void AddInputX() { + AddInput("X", "(Tensor), The first input tensor of elementwise op."); + } + virtual void AddInputY() { + AddInput("Y", "(Tensor), The second input tensor of elementwise op."); + } + virtual void AddOpOutput() { + AddOutput("Out", + "N-dimension tensor. A location into which the result is stored. " + "It's dimension " + "equals with x"); + } + virtual void AddOpComment() { AddComment(GetCommentExamples()); } + + virtual std::string GetOpFuntionality() const { return ""; } + + virtual std::string GetName() const = 0; + virtual std::string GetEquation() const = 0; + + std::string GetCommentExamples() const { + return string::Sprintf(R"DOC( +Elementwise %s Operator. + +%s The equation is: $$%s$$ -- $X$: a tensor of any dimension. +- $X$: a tensor of any dimension. - $Y$: a tensor whose dimensions must be less than or equal to the dimensions of $X$. There are two cases for this operator: @@ -137,10 +167,10 @@ There are two cases for this operator: For case 2: -1. Broadcast $Y$ to match the shape of $X$, where $axis$ is the start dimension index - for broadcasting $Y$ onto $X$. +1. Broadcast $Y$ to match the shape of $X$, where $axis$ is the start dimension index + for broadcasting $Y$ onto $X$. 2. If $axis$ is -1 (default), $axis = rank(X) - rank(Y)$. -3. The trailing dimensions of size 1 for $Y$ will be ignored for the consideration of +3. The trailing dimensions of size 1 for $Y$ will be ignored for the consideration of subsequence, such as shape(Y) = (2, 1) => (2). For example: @@ -154,17 +184,9 @@ For example: shape(X) = (2, 3, 4, 5), shape(Y) = (2), with axis=0 shape(X) = (2, 3, 4, 5), shape(Y) = (2, 1), with axis=0 -The inputs $X$ and $Y$ can carry the different LoD information. -But the output only shares the LoD information with the input $X$. - )DOC", - GetName(), GetEquation())); + GetName(), GetOpFuntionality(), GetEquation()); } - - protected: - virtual std::string GetName() const = 0; - - virtual std::string GetEquation() const = 0; }; class ElementwiseOpGrad : public framework::OperatorWithKernel { diff --git a/paddle/fluid/operators/elementwise/elementwise_pow_op.cc b/paddle/fluid/operators/elementwise/elementwise_pow_op.cc index 59ec9a2d4a5..d3e225a7c07 100644 --- a/paddle/fluid/operators/elementwise/elementwise_pow_op.cc +++ b/paddle/fluid/operators/elementwise/elementwise_pow_op.cc @@ -38,6 +38,15 @@ class ElementwisePowOpMaker : public ElementwiseOpMaker { protected: std::string GetName() const override { return "Pow"; } std::string GetEquation() const override { return "Out = X ^ Y"; } + + void AddInputX() override { AddInput("X", "(Variable), The Base."); } + + void AddInputY() override { AddInput("Y", "(Variable), The exponents."); } + + std::string GetOpFuntionality() const override { + return "First tensor elements raised to powers from the second tensor, " + "element-wise."; + } }; } // namespace operators } // namespace paddle diff --git a/python/paddle/fluid/layers/layer_function_generator.py b/python/paddle/fluid/layers/layer_function_generator.py index 2c982dc26d6..c7b58692d6f 100755 --- a/python/paddle/fluid/layers/layer_function_generator.py +++ b/python/paddle/fluid/layers/layer_function_generator.py @@ -61,7 +61,9 @@ def escape_math(text): _two_dollar_pattern_.sub(r"!!\1!!", text))) -def _generate_doc_string_(op_proto, additional_args_lines=None): +def _generate_doc_string_(op_proto, + additional_args_lines=None, + skip_attrs_set=None): """ Generate docstring by OpProto @@ -93,6 +95,11 @@ def _generate_doc_string_(op_proto, additional_args_lines=None): skip_attrs.add("use_mkldnn") skip_attrs.add("is_test") skip_attrs.add("use_cudnn") + + if skip_attrs_set: + for t in skip_attrs_set: + skip_attrs.add(t) + for each_attr in op_proto.attrs: if each_attr.name in skip_attrs: continue diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index f4f40cf4a82..fd29c9ed906 100755 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -11718,14 +11718,139 @@ def elementwise_mul(x, y, axis=-1, act=None, name=None): def elementwise_max(x, y, axis=-1, act=None, name=None): + """ +Examples: + + .. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + def gen_data(): + return { + "x": np.array([2, 3, 4]), + "y": np.array([1, 5, 2]) + } + + x = fluid.layers.data(name="x", shape=[3], dtype='float32') + y = fluid.layers.data(name="y", shape=[3], dtype='float32') + z = fluid.layers.elementwise_max(x, y) + + place = fluid.CPUPlace() + exe = fluid.Executor(place) + z_value = exe.run(feed=gen_data(), + fetch_list=[z.name]) + + print(z_value) #[2, 5, 4] + + + .. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + def gen_data(): + return { + "x": np.ones((2, 3, 4, 5)).astype('float32'), + "y": np.zeros((3, 4)).astype('float32') + } + + x = fluid.layers.data(name="x", shape=[2,3,4,5], dtype='float32') + y = fluid.layers.data(name="y", shape=[3,4], dtype='float32') + z = fluid.layers.elementwise_max(x, y, axis=1) + + place = fluid.CPUPlace() + exe = fluid.Executor(place) + + z_value = exe.run(feed=gen_data(), + fetch_list=[z.name]) + + print(z_value)#[[[[1., 1., 1., 1., 1.] .... [1., 1., 1., 1., 1.]]]] + + """ return _elementwise_op(LayerHelper('elementwise_max', **locals())) def elementwise_min(x, y, axis=-1, act=None, name=None): + """ +Examples: + + .. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + def gen_data(): + return { + "x": np.array([2, 3, 4]), + "y": np.array([1, 5, 2]) + } + + x = fluid.layers.data(name="x", shape=[3], dtype='float32') + y = fluid.layers.data(name="y", shape=[3], dtype='float32') + z = fluid.layers.elementwise_max(x, y) + + place = fluid.CPUPlace() + exe = fluid.Executor(place) + z_value = exe.run(feed=gen_data(), + fetch_list=[z.name]) + + print(z_value) #[1, 3, 2] + + .. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + def gen_data(): + return { + "x": np.ones((2, 3, 4, 5)).astype('float32'), + "y": np.zeros((3, 4)).astype('float32') + } + + x = fluid.layers.data(name="x", shape=[2,3,4,5], dtype='float32') + y = fluid.layers.data(name="y", shape=[3,4], dtype='float32') + z = fluid.layers.elementwise_max(x, y, axis=1) + + place = fluid.CPUPlace() + exe = fluid.Executor(place) + + z_value = exe.run(feed=gen_data(), + fetch_list=[z.name]) + + print(z_value)#[[[[0., 0., 0., 0., 0.] .... [0., 0., 0., 0., 0.]]]] + """ + return _elementwise_op(LayerHelper('elementwise_min', **locals())) def elementwise_pow(x, y, axis=-1, act=None, name=None): + """ +Examples: + + .. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + def gen_data(): + return { + "x": np.array([2, 3, 4]), + "y": np.array([1, 5, 2]) + } + + x = fluid.layers.data(name="x", shape=[3], dtype='float32') + y = fluid.layers.data(name="y", shape=[3], dtype='float32') + z = fluid.layers.elementwise_pow(x, y) + + place = fluid.CPUPlace() + exe = fluid.Executor(place) + z_value = exe.run(feed=gen_data(), + fetch_list=[z.name]) + + print(z_value) #[2, 243, 16] + """ + return _elementwise_op(LayerHelper('elementwise_pow', **locals())) @@ -11738,15 +11863,33 @@ def elementwise_floordiv(x, y, axis=-1, act=None, name=None): for func in [ - elementwise_add, - elementwise_div, - elementwise_sub, - elementwise_mul, elementwise_max, - elementwise_min, elementwise_pow, + elementwise_min, +]: + op_proto = OpProtoHolder.instance().get_op_proto(func.__name__) + func.__doc__ = _generate_doc_string_( + op_proto, + additional_args_lines=[ + "axis (int32, optional): If X.dimension != Y.dimension, \ + Y.dimension must be a subsequence of x.dimension. \ + And axis is the start dimension index for broadcasting Y onto X. ", + "act (string, optional): Activation applied to the output. \ + Default is None. Details: :ref:`api_guide_activations_en` ", + "name (string, optional): Name of the output. \ + Default is None. It's used to print debug info for developers. Details: \ + :ref:`api_guide_Name` " + ], + skip_attrs_set={"x_data_format", "y_data_format", "axis" + }) + """\n""" + str(func.__doc__) + +for func in [ elementwise_mod, elementwise_floordiv, + elementwise_add, + elementwise_div, + elementwise_sub, + elementwise_mul, ]: op_proto = OpProtoHolder.instance().get_op_proto(func.__name__) func.__doc__ = _generate_doc_string_( -- GitLab