merge_lod_tensor_op.cc 10.0 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
2 3 4 5 6 7 8 9 10 11 12 13 14

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

Y
Yi Wang 已提交
15
#include "paddle/fluid/framework/op_registry.h"
W
wanghuancoder 已提交
16

17 18
#include "paddle/pten/core/lod_utils.h"

19 20 21 22
namespace pten {
class DenseTensor;
}  // namespace pten

W
wanghuancoder 已提交
23 24 25 26 27 28 29 30 31 32
namespace paddle {
namespace framework {
class InferShapeContext;
class OpDesc;
class Scope;
}  // namespace framework
namespace imperative {
class OpBase;
}  // namespace imperative
}  // namespace paddle
33 34 35 36 37 38 39 40 41 42 43 44 45

namespace paddle {
namespace operators {

using LoD = framework::LoD;

class MergeLoDTensorOp : public framework::OperatorBase {
 public:
  MergeLoDTensorOp(const std::string &type,
                   const framework::VariableNameMap &inputs,
                   const framework::VariableNameMap &outputs,
                   const framework::AttributeMap &attrs)
      : OperatorBase(type, inputs, outputs, attrs) {}
46

47 48 49
 protected:
  void RunBase(const framework::Scope &scope,
               const platform::Place &dev_place) const {
D
dzhwinter 已提交
50
    // get device context from pool
Y
Yu Yang 已提交
51 52
    platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance();
    auto &dev_ctx = *pool.Get(dev_place);
D
dzhwinter 已提交
53

54 55 56 57 58 59 60 61 62
    auto &x = scope.FindVar(Input("X"))->Get<framework::LoDTensor>();
    auto &mask = scope.FindVar(Input("Mask"))->Get<framework::LoDTensor>();
    auto &in_true = scope.FindVar(Input("InTrue"))->Get<framework::LoDTensor>();
    auto &in_false =
        scope.FindVar(Input("InFalse"))->Get<framework::LoDTensor>();
    auto *out =
        scope.FindVar(Output("Out"))->GetMutable<framework::LoDTensor>();
    auto level = static_cast<size_t>(Attr<int>("level"));

63 64 65 66
    PADDLE_ENFORCE_EQ(
        in_true.numel() || in_false.numel(), true,
        platform::errors::InvalidArgument(
            "Input(InTrue) or Input(InFalse) should be initialized."));
67

68
    auto &mask_dim = mask.dims();
69 70 71 72
    std::unique_ptr<framework::LoDTensor> cpu_mask{new framework::LoDTensor()};
    if (platform::is_cpu_place(mask.place())) {
      cpu_mask->ShareDataWith(mask);
    } else if (platform::is_gpu_place(mask.place())) {
73
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
Y
Yi Wang 已提交
74 75
      framework::TensorCopy(mask, platform::CPUPlace(), dev_ctx,
                            cpu_mask.get());
76
#else
77 78 79
      PADDLE_THROW(platform::errors::PreconditionNotMet(
          "Not supported GPU, Please recompile or reinstall paddle with CUDA "
          "support."));
80 81 82 83
#endif
    }
    auto *mask_data = cpu_mask->data<bool>();

84
    platform::Place place = dev_place;
85
    int64_t batch_size = in_true.dims()[0] + in_false.dims()[0];
Y
Yu Yang 已提交
86
    auto data_type = in_true.IsInitialized() ? in_true.type() : in_false.type();
87 88 89 90 91 92 93 94 95 96 97 98
    int rank;
    framework::DDim in_dims;
    if (in_true.IsInitialized()) {
      rank = in_true.dims().size();
      in_dims = framework::slice_ddim(in_true.dims(), 1, rank);
    } else {
      rank = in_false.dims().size();
      in_dims = framework::slice_ddim(in_false.dims(), 1, rank);
    }

    auto in_dim_vec = framework::vectorize(in_dims);
    in_dim_vec.insert(in_dim_vec.begin(), batch_size);
99

100
    framework::DDim out_dims = framework::make_ddim(in_dim_vec);
101
    out->Resize(out_dims);
102

103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126
    out->mutable_data(place, data_type);

    auto *out_lod = out->mutable_lod();
    out_lod->clear();
    size_t out_offset = 0;

    // Build LoDTensor `out`

    size_t in_true_idx = 0;
    size_t in_false_idx = 0;
    for (size_t i = 0; i < static_cast<size_t>(mask_dim[0]); i++) {
      const framework::LoDTensor *input = nullptr;
      size_t *in_idx = nullptr;
      if (static_cast<int>(mask_data[i]) == 0) {
        input = &in_false;
        in_idx = &in_false_idx;
      } else {
        input = &in_true;
        in_idx = &in_true_idx;
      }
      auto lod_and_offset = framework::GetSubLoDAndAbsoluteOffset(
          input->lod(), *in_idx, (*in_idx) + 1, 0);
      auto &lod_length = lod_and_offset.first;

127
      pten::AppendLoD(out_lod, lod_length);
128 129 130 131

      size_t start_offset = lod_and_offset.second.first;
      size_t end_offset = lod_and_offset.second.second;

132 133 134 135 136
      PADDLE_ENFORCE_GE(end_offset, start_offset,
                        platform::errors::InvalidArgument(
                            "The end offset less than start offset, end offset "
                            "is %d, start offset is %d.",
                            end_offset, start_offset));
137 138 139 140
      size_t len = end_offset - start_offset;
      if (len == 0) {
        continue;
      }
D
dzhwinter 已提交
141
      auto slice = out->Slice(out_offset, out_offset + len);
Y
Yi Wang 已提交
142 143
      framework::TensorCopy(input->Slice(start_offset, end_offset), place,
                            dev_ctx, &slice);
144 145 146 147 148 149 150 151
      out_offset += len;
      (*in_idx) += 1;
    }

    for (size_t i = 0; i < level; i++) {
      out_lod->insert(out_lod->begin(), x.lod()[i]);
    }
  }
152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178

 private:
  void RunImpl(const framework::Scope &scope,
               const platform::Place &dev_place) const override {
    RunBase(scope, dev_place);
  }
};

class MergeLoDTensorInferOp : public MergeLoDTensorOp {
 public:
  MergeLoDTensorInferOp(const std::string &type,
                        const framework::VariableNameMap &inputs,
                        const framework::VariableNameMap &outputs,
                        const framework::AttributeMap &attrs)
      : MergeLoDTensorOp(type, inputs, outputs, attrs) {}

 private:
  void RunImpl(const framework::Scope &scope,
               const platform::Place &dev_place) const override {
    RunBase(scope, dev_place);
    framework::Variable *in_true_var = scope.FindVar(Input("InTrue"));
    framework::Variable *in_false_var = scope.FindVar(Input("InFalse"));
    in_true_var->Clear();
    in_false_var->Clear();
    in_true_var->GetMutable<framework::LoDTensor>();
    in_false_var->GetMutable<framework::LoDTensor>();
  }
179 180 181 182
};

class MergeLoDTensorOpProtoMaker : public framework::OpProtoAndCheckerMaker {
 public:
Y
Yu Yang 已提交
183
  void Make() override {
184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204
    AddInput("X",
             "The input LoDTensor, contains complete lod information to "
             "construct the output");
    AddInput("Mask", "A bool column vector which mask the input");
    AddInput("InTrue", "The True branch to be merged");
    AddInput("InFalse", "The False branch to be merged");
    AddOutput("Out", "The merged output LoDTensor");
    AddAttr<int>("level", "(int) the specific lod level to rank.")
        .SetDefault(0)
        .EqualGreaterThan(0);
    AddComment(
        R"DOC(
        Merge True and False branches of LoDTensor into a single Output,
        with a mask at certain lod level. X is used to obtain complete
        lod information. Please refer to SplitLoDTensorOp.)DOC");
  }
};

class MergeLoDTensorInferShape : public framework::InferShapeBase {
 public:
  void operator()(framework::InferShapeContext *context) const override {
205 206 207 208 209 210 211 212 213
    OP_INOUT_CHECK(context->HasInput("X"), "Input", "X", "merge_lod_tensor");
    OP_INOUT_CHECK(context->HasInput("Mask"), "Input", "Mask",
                   "merge_lod_tensor");
    OP_INOUT_CHECK(context->HasInput("InTrue"), "Input", "InTrue",
                   "merge_lod_tensor");
    OP_INOUT_CHECK(context->HasInput("InFalse"), "Input", "InFalse",
                   "merge_lod_tensor");
    OP_INOUT_CHECK(context->HasOutput("Out"), "Output", "Out",
                   "merge_lod_tensor");
214
    auto mask_dim = context->GetInputDim("Mask");
Z
Zhaolong Xing 已提交
215
    PADDLE_ENFORCE_EQ(mask_dim.size(), 2,
216 217 218 219 220 221 222 223
                      platform::errors::InvalidArgument(
                          "If you are using IfElse OP:"
                          "\n\nie = fluid.layers.IfElse(cond=cond)\nwith "
                          "ie.true_block():\n    out_1 = ie.input(x)\n\n"
                          "Please ensure that the cond is a 2-D tensor and "
                          "the second dim size of cond is 1. "
                          "But now the cond's shape is [%s].\n",
                          mask_dim));
224
    if (context->IsRuntime() || mask_dim[1] > 0) {
Z
Zhaolong Xing 已提交
225
      PADDLE_ENFORCE_EQ(mask_dim[1], 1,
226 227 228 229 230 231 232 233
                        platform::errors::InvalidArgument(
                            "If you are using IfElse OP:"
                            "\n\nie = fluid.layers.IfElse(cond=cond)\nwith "
                            "ie.true_block():\n    out_1 = ie.input(x)\n\n"
                            "Please ensure that the cond is a 2-D tensor "
                            "and the second dim size of cond is 1. "
                            "But now the cond's shape is [%s].\n",
                            mask_dim));
234
    }
235 236 237 238 239

    context->SetOutputDim("Out", context->GetInputDim("InTrue"));
  }
};

H
hong 已提交
240 241
template <typename T>
class MergeLoDTensorGradMaker : public framework::SingleGradOpMaker<T> {
242
 public:
H
hong 已提交
243
  using framework::SingleGradOpMaker<T>::SingleGradOpMaker;
244 245

 protected:
246
  void Apply(GradOpPtr<T> grad_op) const override {
247
    grad_op->SetType("split_lod_tensor");
H
hong 已提交
248 249 250 251 252
    grad_op->SetInput("X", this->OutputGrad("Out"));
    grad_op->SetInput("Mask", this->Input("Mask"));
    grad_op->SetOutput("OutTrue", this->InputGrad("InTrue"));
    grad_op->SetOutput("OutFalse", this->InputGrad("InFalse"));
    grad_op->SetAttrMap(this->Attrs());
253 254 255 256 257 258 259 260
  }
};

}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;
REGISTER_OPERATOR(merge_lod_tensor, ops::MergeLoDTensorOp,
261 262
                  ops::MergeLoDTensorOpProtoMaker,
                  ops::MergeLoDTensorInferShape,
H
hong 已提交
263 264 265 266 267 268 269
                  ops::MergeLoDTensorGradMaker<paddle::framework::OpDesc>,
                  ops::MergeLoDTensorGradMaker<paddle::imperative::OpBase>);
REGISTER_OPERATOR(
    merge_lod_tensor_infer, ops::MergeLoDTensorInferOp,
    ops::MergeLoDTensorOpProtoMaker, ops::MergeLoDTensorInferShape,
    paddle::framework::EmptyGradOpMaker<paddle::framework::OpDesc>,
    paddle::framework::EmptyGradOpMaker<paddle::imperative::OpBase>);