compare_op.cc 6.7 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Y
Yu Yang 已提交
2

L
Luo Tao 已提交
3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
Y
Yu Yang 已提交
6

L
Luo Tao 已提交
7
    http://www.apache.org/licenses/LICENSE-2.0
Y
Yu Yang 已提交
8

L
Luo Tao 已提交
9 10 11 12 13
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
Y
Yu Yang 已提交
14

W
Wu Yi 已提交
15
#include "paddle/fluid/operators/controlflow/compare_op.h"
16
#include <algorithm>
S
Siddharth Goyal 已提交
17
#include <string>
18
#include <vector>
Y
Yi Wang 已提交
19
#include "paddle/fluid/framework/op_registry.h"
Z
Zhong Hui 已提交
20
#include "paddle/fluid/framework/op_version_registry.h"
21
#include "paddle/fluid/operators/elementwise/elementwise_op_function.h"
22
#include "paddle/pten/common/place.h"
23

Y
Yu Yang 已提交
24 25
namespace paddle {
namespace operators {
Y
Yiqun Liu 已提交
26

Y
Yu Yang 已提交
27 28 29
template <typename OpComment>
class CompareOpProtoMaker : public framework::OpProtoAndCheckerMaker {
 public:
Y
Yu Yang 已提交
30
  void Make() override {
Y
Yu Yang 已提交
31
    OpComment comment;
Y
yuyang18 已提交
32 33 34 35
    AddInput("X", string::Sprintf("the left hand operand of %s operator",
                                  comment.type));
    AddInput("Y", string::Sprintf("the right hand operand of %s operator",
                                  comment.type));
36 37 38 39 40
    AddAttr<int>(
        "axis",
        "The start dimension index for broadcasting Y onto X. [default -1]")
        .SetDefault(-1)
        .EqualGreaterThan(-1);
J
JiayiFeng 已提交
41
    AddAttr<bool>("force_cpu",
Y
yuyang18 已提交
42
                  "Force fill output variable to cpu "
J
JiayiFeng 已提交
43
                  "memory. Otherwise, fill output variable to the running "
Y
yuyang18 已提交
44
                  "device [default true].")
45
        .SetDefault(false);
Y
yuyang18 已提交
46 47
    AddOutput("Out", string::Sprintf("n-dim bool tensor. Each element is %s",
                                     comment.equation));
Y
yuyang18 已提交
48
    AddComment(string::Sprintf(R"DOC(
Y
Yu Yang 已提交
49 50
It operates element-wise on X and Y, and returns the Out. Each of them is a
N-dim tensor. X and Y could be any type.  The each element of the Out tensor is
Y
yuyang18 已提交
51
calculated by $%s$
Y
Yu Yang 已提交
52
)DOC",
Y
yuyang18 已提交
53
                               comment.equation));
Y
Yu Yang 已提交
54 55 56 57
  }
};

template <typename OpComment>
Z
Zeng Jinle 已提交
58
class CompareOp : public framework::OperatorWithKernel {
Y
Yu Yang 已提交
59
 public:
Z
Zeng Jinle 已提交
60 61 62 63
  using framework::OperatorWithKernel::OperatorWithKernel;

 protected:
  void InferShape(framework::InferShapeContext* context) const override {
Y
Yu Yang 已提交
64
    OpComment comment;
65
    OP_INOUT_CHECK(context->HasInput("X"), "Input", "X", comment.type);
66
    OP_INOUT_CHECK(context->HasInput("Y"), "Input", "Y", comment.type);
Y
Yu Yang 已提交
67 68
    auto dim_x = context->GetInputDim("X");
    auto dim_y = context->GetInputDim("Y");
69

70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85
    if (context->GetInputDim("X") == context->GetInputDim("Y")) {
      context->ShareDim("X", /*->*/ "Out");
      context->ShareLoD("X", /*->*/ "Out");
    } else {
      int max_dim = std::max(dim_x.size(), dim_y.size());
      int axis = std::abs(dim_x.size() - dim_y.size());
      std::vector<int> x_dims_array(max_dim);
      std::vector<int> y_dims_array(max_dim);
      std::vector<int> out_dims_array(max_dim);
      GetBroadcastDimsArrays(dim_x, dim_y, x_dims_array.data(),
                             y_dims_array.data(), out_dims_array.data(),
                             max_dim, axis);
      context->SetOutputDim("Out", framework::make_ddim(out_dims_array));
      // to do
      context->ShareLoD("X", /*->*/ "Out");
    }
Y
Yu Yang 已提交
86 87
  }

88
  framework::OpKernelType GetExpectedKernelType(
Y
Yiqun Liu 已提交
89
      const framework::ExecutionContext& ctx) const override {
90
    framework::OpKernelType kt = OperatorWithKernel::GetExpectedKernelType(ctx);
91
    // CompareOp kernel's device type is decided by input tensor place
J
JiayiFeng 已提交
92
    bool force_cpu = ctx.Attr<bool>("force_cpu");
93 94 95
    if (force_cpu) {
      kt.place_ = platform::CPUPlace();
    } else {
96 97
      if (ctx.Input<framework::LoDTensor>("X")->place().GetType() !=
          pten::AllocationType::GPUPINNED) {
98 99 100 101 102
        kt.place_ = ctx.Input<framework::LoDTensor>("X")->place();
      } else {
        kt.place_ = ctx.GetPlace();
      }
    }
103 104 105 106
    return kt;
  }
};

Y
Yu Yang 已提交
107 108 109
}  // namespace operators
}  // namespace paddle

Z
Zhong Hui 已提交
110 111 112 113
#define REGISTER_COMPARE_OP_VERSION(op_type)                               \
  REGISTER_OP_VERSION(op_type)                                             \
      .AddCheckpoint(                                                      \
          R"ROC(Upgrade compare ops, add a new attribute [force_cpu])ROC", \
114
          paddle::framework::compatible::OpVersionDesc().ModifyAttr(       \
Z
Zhong Hui 已提交
115
              "force_cpu",                                                 \
116
              "In order to force fill output variable to gpu memory.",     \
Z
Zhong Hui 已提交
117 118 119 120 121 122 123 124 125 126 127 128 129 130 131
              false));

#define REGISTER_COMPARE_OP(op_type, _equation)                           \
  struct _##op_type##Comment {                                            \
    static char type[];                                                   \
    static char equation[];                                               \
  };                                                                      \
  char _##op_type##Comment::type[]{#op_type};                             \
  char _##op_type##Comment::equation[]{_equation};                        \
  REGISTER_OPERATOR(                                                      \
      op_type, ::paddle::operators::CompareOp<_##op_type##Comment>,       \
      ::paddle::operators::CompareOpProtoMaker<_##op_type##Comment>,      \
      ::paddle::framework::EmptyGradOpMaker<paddle::framework::OpDesc>,   \
      ::paddle::framework::EmptyGradOpMaker<paddle::imperative::OpBase>); \
  REGISTER_COMPARE_OP_VERSION(op_type);
Y
Yu Yang 已提交
132

Q
qiaolongfei 已提交
133
REGISTER_COMPARE_OP(less_than, "Out = X < Y");
134
REGISTER_COMPARE_KERNEL(less_than, CPU, paddle::operators::LessThanFunctor,
135
                        paddle::operators::GreaterThanFunctor);
Q
qiaolongfei 已提交
136
REGISTER_COMPARE_OP(less_equal, "Out = X <= Y");
137
REGISTER_COMPARE_KERNEL(less_equal, CPU, paddle::operators::LessEqualFunctor,
138
                        paddle::operators::GreaterEqualFunctor);
Q
qiaolongfei 已提交
139 140
REGISTER_COMPARE_OP(greater_than, "Out = X > Y");
REGISTER_COMPARE_KERNEL(greater_than, CPU,
141
                        paddle::operators::GreaterThanFunctor,
142
                        paddle::operators::LessThanFunctor);
Q
qiaolongfei 已提交
143 144
REGISTER_COMPARE_OP(greater_equal, "Out = X >= Y");
REGISTER_COMPARE_KERNEL(greater_equal, CPU,
145
                        paddle::operators::GreaterEqualFunctor,
146
                        paddle::operators::LessEqualFunctor);
Q
qiaolongfei 已提交
147
REGISTER_COMPARE_OP(equal, "Out = X == Y");
148 149
REGISTER_COMPARE_KERNEL(equal, CPU, paddle::operators::EqualFunctor,
                        paddle::operators::EqualFunctor);
Q
qiaolongfei 已提交
150
REGISTER_COMPARE_OP(not_equal, "Out = X != Y");
151 152
REGISTER_COMPARE_KERNEL(not_equal, CPU, paddle::operators::NotEqualFunctor,
                        paddle::operators::NotEqualFunctor);