compare_op.cc 6.6 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Y
Yu Yang 已提交
2

L
Luo Tao 已提交
3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
Y
Yu Yang 已提交
6

L
Luo Tao 已提交
7
    http://www.apache.org/licenses/LICENSE-2.0
Y
Yu Yang 已提交
8

L
Luo Tao 已提交
9 10 11 12 13
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
Y
Yu Yang 已提交
14

W
Wu Yi 已提交
15
#include "paddle/fluid/operators/controlflow/compare_op.h"
16
#include <algorithm>
S
Siddharth Goyal 已提交
17
#include <string>
18
#include <vector>
Y
Yi Wang 已提交
19
#include "paddle/fluid/framework/op_registry.h"
Z
Zhong Hui 已提交
20
#include "paddle/fluid/framework/op_version_registry.h"
21
#include "paddle/fluid/operators/elementwise/elementwise_op_function.h"
22

Y
Yu Yang 已提交
23 24
namespace paddle {
namespace operators {
Y
Yiqun Liu 已提交
25

Y
Yu Yang 已提交
26 27 28
template <typename OpComment>
class CompareOpProtoMaker : public framework::OpProtoAndCheckerMaker {
 public:
Y
Yu Yang 已提交
29
  void Make() override {
Y
Yu Yang 已提交
30
    OpComment comment;
Y
yuyang18 已提交
31 32 33 34
    AddInput("X", string::Sprintf("the left hand operand of %s operator",
                                  comment.type));
    AddInput("Y", string::Sprintf("the right hand operand of %s operator",
                                  comment.type));
35 36 37 38 39
    AddAttr<int>(
        "axis",
        "The start dimension index for broadcasting Y onto X. [default -1]")
        .SetDefault(-1)
        .EqualGreaterThan(-1);
J
JiayiFeng 已提交
40
    AddAttr<bool>("force_cpu",
Y
yuyang18 已提交
41
                  "Force fill output variable to cpu "
J
JiayiFeng 已提交
42
                  "memory. Otherwise, fill output variable to the running "
Y
yuyang18 已提交
43
                  "device [default true].")
44
        .SetDefault(false);
Y
yuyang18 已提交
45 46
    AddOutput("Out", string::Sprintf("n-dim bool tensor. Each element is %s",
                                     comment.equation));
Y
yuyang18 已提交
47
    AddComment(string::Sprintf(R"DOC(
Y
Yu Yang 已提交
48 49
It operates element-wise on X and Y, and returns the Out. Each of them is a
N-dim tensor. X and Y could be any type.  The each element of the Out tensor is
Y
yuyang18 已提交
50
calculated by $%s$
Y
Yu Yang 已提交
51
)DOC",
Y
yuyang18 已提交
52
                               comment.equation));
Y
Yu Yang 已提交
53 54 55 56
  }
};

template <typename OpComment>
Z
Zeng Jinle 已提交
57
class CompareOp : public framework::OperatorWithKernel {
Y
Yu Yang 已提交
58
 public:
Z
Zeng Jinle 已提交
59 60 61 62
  using framework::OperatorWithKernel::OperatorWithKernel;

 protected:
  void InferShape(framework::InferShapeContext* context) const override {
Y
Yu Yang 已提交
63
    OpComment comment;
64
    OP_INOUT_CHECK(context->HasInput("X"), "Input", "X", comment.type);
65
    OP_INOUT_CHECK(context->HasInput("Y"), "Input", "Y", comment.type);
Y
Yu Yang 已提交
66 67
    auto dim_x = context->GetInputDim("X");
    auto dim_y = context->GetInputDim("Y");
68

69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84
    if (context->GetInputDim("X") == context->GetInputDim("Y")) {
      context->ShareDim("X", /*->*/ "Out");
      context->ShareLoD("X", /*->*/ "Out");
    } else {
      int max_dim = std::max(dim_x.size(), dim_y.size());
      int axis = std::abs(dim_x.size() - dim_y.size());
      std::vector<int> x_dims_array(max_dim);
      std::vector<int> y_dims_array(max_dim);
      std::vector<int> out_dims_array(max_dim);
      GetBroadcastDimsArrays(dim_x, dim_y, x_dims_array.data(),
                             y_dims_array.data(), out_dims_array.data(),
                             max_dim, axis);
      context->SetOutputDim("Out", framework::make_ddim(out_dims_array));
      // to do
      context->ShareLoD("X", /*->*/ "Out");
    }
Y
Yu Yang 已提交
85 86
  }

87
  framework::OpKernelType GetExpectedKernelType(
Y
Yiqun Liu 已提交
88
      const framework::ExecutionContext& ctx) const override {
89
    framework::OpKernelType kt = OperatorWithKernel::GetExpectedKernelType(ctx);
90
    // CompareOp kernel's device type is decided by input tensor place
J
JiayiFeng 已提交
91
    bool force_cpu = ctx.Attr<bool>("force_cpu");
92 93 94 95 96 97 98 99 100 101
    if (force_cpu) {
      kt.place_ = platform::CPUPlace();
    } else {
      if (ctx.Input<framework::LoDTensor>("X")->place().type() !=
          typeid(platform::CUDAPinnedPlace)) {
        kt.place_ = ctx.Input<framework::LoDTensor>("X")->place();
      } else {
        kt.place_ = ctx.GetPlace();
      }
    }
102 103 104 105
    return kt;
  }
};

Y
Yu Yang 已提交
106 107 108
}  // namespace operators
}  // namespace paddle

Z
Zhong Hui 已提交
109 110 111 112
#define REGISTER_COMPARE_OP_VERSION(op_type)                               \
  REGISTER_OP_VERSION(op_type)                                             \
      .AddCheckpoint(                                                      \
          R"ROC(Upgrade compare ops, add a new attribute [force_cpu])ROC", \
113
          paddle::framework::compatible::OpVersionDesc().ModifyAttr(       \
Z
Zhong Hui 已提交
114
              "force_cpu",                                                 \
115
              "In order to force fill output variable to gpu memory.",     \
Z
Zhong Hui 已提交
116 117 118 119 120 121 122 123 124 125 126 127 128 129 130
              false));

#define REGISTER_COMPARE_OP(op_type, _equation)                           \
  struct _##op_type##Comment {                                            \
    static char type[];                                                   \
    static char equation[];                                               \
  };                                                                      \
  char _##op_type##Comment::type[]{#op_type};                             \
  char _##op_type##Comment::equation[]{_equation};                        \
  REGISTER_OPERATOR(                                                      \
      op_type, ::paddle::operators::CompareOp<_##op_type##Comment>,       \
      ::paddle::operators::CompareOpProtoMaker<_##op_type##Comment>,      \
      ::paddle::framework::EmptyGradOpMaker<paddle::framework::OpDesc>,   \
      ::paddle::framework::EmptyGradOpMaker<paddle::imperative::OpBase>); \
  REGISTER_COMPARE_OP_VERSION(op_type);
Y
Yu Yang 已提交
131

Q
qiaolongfei 已提交
132
REGISTER_COMPARE_OP(less_than, "Out = X < Y");
133 134
REGISTER_COMPARE_KERNEL(less_than, CPU, paddle::operators::LessThanFunctor,
                        paddle::operators::GreaterEqualFunctor);
Q
qiaolongfei 已提交
135
REGISTER_COMPARE_OP(less_equal, "Out = X <= Y");
136 137
REGISTER_COMPARE_KERNEL(less_equal, CPU, paddle::operators::LessEqualFunctor,
                        paddle::operators::GreaterThanFunctor);
Q
qiaolongfei 已提交
138 139
REGISTER_COMPARE_OP(greater_than, "Out = X > Y");
REGISTER_COMPARE_KERNEL(greater_than, CPU,
140 141
                        paddle::operators::GreaterThanFunctor,
                        paddle::operators::LessEqualFunctor);
Q
qiaolongfei 已提交
142 143
REGISTER_COMPARE_OP(greater_equal, "Out = X >= Y");
REGISTER_COMPARE_KERNEL(greater_equal, CPU,
144 145
                        paddle::operators::GreaterEqualFunctor,
                        paddle::operators::LessThanFunctor);
Q
qiaolongfei 已提交
146
REGISTER_COMPARE_OP(equal, "Out = X == Y");
147 148
REGISTER_COMPARE_KERNEL(equal, CPU, paddle::operators::EqualFunctor,
                        paddle::operators::EqualFunctor);
Q
qiaolongfei 已提交
149
REGISTER_COMPARE_OP(not_equal, "Out = X != Y");
150 151
REGISTER_COMPARE_KERNEL(not_equal, CPU, paddle::operators::NotEqualFunctor,
                        paddle::operators::NotEqualFunctor);