未验证 提交 6df4a667 编写于 作者: S scotty 提交者: GitHub

add autogen code support for logical_and, logical_not, logical_or and logical_xor (#52451)

上级 d394c9ed
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <algorithm>
#include <string>
#include <vector>
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/elementwise/elementwise_op_function.h"
namespace paddle {
namespace operators {
template <typename OpComment>
class BinaryLogicalOpProtoMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() override {
OpComment comment;
AddInput("X",
string::Sprintf("Left hand operand of %s operator. Must be "
"a Variable of type being one of bool, int8, "
"int16, int32, int64, float32, float64.",
comment.type));
AddInput("Y",
string::Sprintf("Right hand operand of %s operator. Must be "
"a Variable of type being one of bool, int8, "
"int16, int32, int64, float32, float64.",
comment.type));
AddOutput("Out", string::Sprintf("n-dim bool Variable"));
AddComment(string::Sprintf(R"DOC(%s Operator
It operates element-wise on X and Y, and returns the Out. X, Y and Out are N-dim phi::DenseTensor or Tensor.
Each element of Out is calculated by %s
)DOC",
comment.type,
comment.equation));
}
};
template <typename OpComment>
class UnaryLogicalOpProtoMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() override {
OpComment comment;
AddInput("X",
string::Sprintf(
"Operand of %s operator. Must be "
"a phi::DenseTensor or Tensor of type being one of bool, "
"int8, int16, int32, int64, float32, float64.",
comment.type));
AddOutput("Out", string::Sprintf("n-dim bool phi::DenseTensor or Tensor."));
AddComment(string::Sprintf(R"DOC(%s Operator
It operates element-wise on X, and returns the Out. X and Out are N-dim phi::DenseTensor or Tensor.
Each element of Out is calculated by %s
)DOC",
comment.type,
comment.equation));
}
};
class LogicalOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected:
phi::KernelKey GetExpectedKernelType(
const framework::ExecutionContext &ctx) const override {
phi::KernelKey kt = OperatorWithKernel::GetExpectedKernelType(ctx);
// LogicalOp kernel's device type is decided by input tensor place
kt.set_backend(
phi::TransToPhiBackend(ctx.Input<phi::DenseTensor>("X")->place()));
return kt;
}
};
template <typename OpComment>
class UnaryLogicalOp : public LogicalOp {
public:
using LogicalOp::LogicalOp;
protected:
void InferShape(framework::InferShapeContext *context) const override {
OpComment comment;
OP_INOUT_CHECK(context->HasInput("X"), "Input", "X", comment.type);
context->SetOutputDim("Out", context->GetInputDim("X"));
context->ShareLoD("X", "Out");
}
};
template <typename OpComment>
class BinaryLogicalOp : public LogicalOp {
public:
using LogicalOp::LogicalOp;
protected:
void InferShape(framework::InferShapeContext *context) const override {
OpComment comment;
OP_INOUT_CHECK(context->HasInput("X"), "Input", "X", comment.type);
OP_INOUT_CHECK(context->HasInput("Y"), "Input", "Y", comment.type);
auto dim_x = context->GetInputDim("X");
auto dim_y = context->GetInputDim("Y");
if (dim_x == dim_y) {
context->SetOutputDim("Out", dim_x);
} else {
int max_dim = std::max(dim_x.size(), dim_y.size());
int axis = std::abs(dim_x.size() - dim_y.size());
std::vector<int> x_dims_array(max_dim);
std::vector<int> y_dims_array(max_dim);
std::vector<int> out_dims_array(max_dim);
GetBroadcastDimsArrays(dim_x,
dim_y,
x_dims_array.data(),
y_dims_array.data(),
out_dims_array.data(),
max_dim,
axis);
context->SetOutputDim("Out", phi::make_ddim(out_dims_array));
}
context->ShareLoD("X", "Out");
}
};
} // namespace operators
} // namespace paddle
#define REGISTER_BINARY_LOGICAL_OP(op_type, _equation) \
struct _##op_type##Comment { \
static char type[]; \
static char equation[]; \
}; \
char _##op_type##Comment::type[]{#op_type}; \
char _##op_type##Comment::equation[]{_equation}; \
REGISTER_OPERATOR( \
op_type, \
::paddle::operators::BinaryLogicalOp<_##op_type##Comment>, \
::paddle::operators::BinaryLogicalOpProtoMaker<_##op_type##Comment>, \
::paddle::framework::EmptyGradOpMaker<paddle::framework::OpDesc>, \
::paddle::framework::EmptyGradOpMaker<paddle::imperative::OpBase>);
#define REGISTER_UNARY_LOGICAL_OP(op_type, _equation) \
struct _##op_type##Comment { \
static char type[]; \
static char equation[]; \
}; \
char _##op_type##Comment::type[]{#op_type}; \
char _##op_type##Comment::equation[]{_equation}; \
REGISTER_OPERATOR( \
op_type, \
::paddle::operators::UnaryLogicalOp<_##op_type##Comment>, \
::paddle::operators::UnaryLogicalOpProtoMaker<_##op_type##Comment>, \
::paddle::framework::EmptyGradOpMaker<paddle::framework::OpDesc>, \
::paddle::framework::EmptyGradOpMaker<paddle::imperative::OpBase>);
REGISTER_BINARY_LOGICAL_OP(logical_and, "$$Out = X \\&\\& Y$$");
REGISTER_BINARY_LOGICAL_OP(logical_or, "$$Out = X || Y$$");
REGISTER_UNARY_LOGICAL_OP(logical_not, "$$Out = !X$$");
REGISTER_BINARY_LOGICAL_OP(logical_xor,
"$$Out = (X || Y) \\&\\& !(X \\&\\& Y)$$");
...@@ -846,38 +846,6 @@ ...@@ -846,38 +846,6 @@
func : logcumsumexp func : logcumsumexp
backward : logcumsumexp_grad backward : logcumsumexp_grad
- op : logical_and
args : (Tensor x, Tensor y)
output : Tensor(out)
infer_meta :
func : ElementwiseInferMeta
kernel :
func : logical_and
- op : logical_not
args : (Tensor x)
output : Tensor(out)
infer_meta :
func : UnchangedInferMeta
kernel :
func : logical_not
- op : logical_or
args : (Tensor x, Tensor y)
output : Tensor(out)
infer_meta :
func : ElementwiseInferMeta
kernel :
func : logical_or
- op : logical_xor
args : (Tensor x, Tensor y)
output : Tensor(out)
infer_meta :
func : ElementwiseInferMeta
kernel :
func : logical_xor
- op : logspace - op : logspace
args : (Tensor start, Tensor stop, Tensor num, Tensor base, DataType dtype, Place place={}) args : (Tensor start, Tensor stop, Tensor num, Tensor base, DataType dtype, Place place={})
output : Tensor(out) output : Tensor(out)
......
...@@ -1210,6 +1210,30 @@ ...@@ -1210,6 +1210,30 @@
extra : extra :
attrs : [bool use_mkldnn = false] attrs : [bool use_mkldnn = false]
- op : logical_and
inputs :
{x : X, y : Y}
outputs :
out : Out
- op : logical_not
inputs :
x : X
outputs :
out : Out
- op : logical_or
inputs :
{x : X, y : Y}
outputs :
out : Out
- op : logical_xor
inputs :
{x : X, y : Y}
outputs :
out : Out
- op : logit - op : logit
inputs : inputs :
x : X x : X
......
...@@ -963,6 +963,42 @@ ...@@ -963,6 +963,42 @@
data_type : x data_type : x
backward : log_softmax_grad backward : log_softmax_grad
- op : logical_and
args : (Tensor x, Tensor y)
output : Tensor(out)
infer_meta :
func : ElementwiseInferMeta
kernel :
func : logical_and
data_type : x
- op : logical_not
args : (Tensor x)
output : Tensor(out)
infer_meta :
func : UnchangedInferMeta
kernel :
func : logical_not
data_type : x
- op : logical_or
args : (Tensor x, Tensor y)
output : Tensor(out)
infer_meta :
func : ElementwiseInferMeta
kernel :
func : logical_or
data_type : x
- op : logical_xor
args : (Tensor x, Tensor y)
output : Tensor(out)
infer_meta :
func : ElementwiseInferMeta
kernel :
func : logical_xor
data_type : x
- op : logit - op : logit
args : (Tensor x, float eps = 1e-6f) args : (Tensor x, float eps = 1e-6f)
output : Tensor output : Tensor
......
...@@ -228,7 +228,6 @@ def logical_xor(x, y, out=None, name=None): ...@@ -228,7 +228,6 @@ def logical_xor(x, y, out=None, name=None):
) )
@templatedoc()
def logical_not(x, out=None, name=None): def logical_not(x, out=None, name=None):
""" """
...@@ -250,7 +249,7 @@ def logical_not(x, out=None, name=None): ...@@ -250,7 +249,7 @@ def logical_not(x, out=None, name=None):
name(str|None): The default value is None. Normally there is no need for users to set this property. For more information, please refer to :ref:`api_guide_Name`. name(str|None): The default value is None. Normally there is no need for users to set this property. For more information, please refer to :ref:`api_guide_Name`.
Returns: Returns:
Tensor: ${out_comment} N-D Tensor. A location into which the result is stored. It's dimension equals with ``x``.
Examples: Examples:
.. code-block:: python .. code-block:: python
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册