shrink_rnn_memory_op.cc 6.5 KB
Newer Older
Y
Yang Yu 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.

   Licensed under the Apache License, Version 2.0 (the "License");
   you may not use this file except in compliance with the License.
   You may obtain a copy of the License at

   http://www.apache.org/licenses/LICENSE-2.0

   Unless required by applicable law or agreed to in writing, software
   distributed under the License is distributed on an "AS IS" BASIS,
   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
   See the License for the specific language governing permissions and
   limitations under the License. */
#include "paddle/framework/lod_rank_table.h"
#include "paddle/operators/array_operator.h"
#include "paddle/operators/math/math_function.h"

namespace paddle {
namespace operators {

Y
Yang Yu 已提交
21
class ShrinkRNNMemoryOp : public ArrayOp {
Y
Yang Yu 已提交
22
 public:
Y
Yang Yu 已提交
23 24 25 26
  ShrinkRNNMemoryOp(const std::string &type,
                    const framework::VariableNameMap &inputs,
                    const framework::VariableNameMap &outputs,
                    const framework::AttributeMap &attrs)
Y
Yang Yu 已提交
27 28 29 30 31 32 33 34 35 36 37 38
      : ArrayOp(type, inputs, outputs, attrs) {}

  void Run(const framework::Scope &scope,
           const platform::DeviceContext &dev_ctx) const override {
    auto *x_var = scope.FindVar(Input("X"));
    PADDLE_ENFORCE(x_var != nullptr, "Input X must be set");
    auto &x_tensor = x_var->Get<framework::LoDTensor>();
    size_t offset = this->GetOffset(scope, dev_ctx);
    auto *rank_table_var = scope.FindVar(Input("RankTable"));
    PADDLE_ENFORCE(rank_table_var != nullptr, "RankTable must be set");
    auto &rank_table = rank_table_var->Get<framework::LoDRankTable>();

Y
Yang Yu 已提交
39 40 41 42 43 44
    auto &rank_items = rank_table.items();
    int dst_num_rows =
        std::lower_bound(rank_items.begin(), rank_items.end(), offset,
                         [](const framework::LoDRankTable::TableItem &a,
                            size_t b) { return a.length > b; }) -
        rank_items.begin();
Y
Yang Yu 已提交
45 46 47 48 49 50 51 52 53 54

    auto *out_var = scope.FindVar(Output("Out"));
    PADDLE_ENFORCE(out_var != nullptr, "Output Out must be set");
    auto &out_tensor = *out_var->GetMutable<framework::LoDTensor>();
    if (dst_num_rows != 0) {
      out_tensor.ShareDataWith(x_tensor.Slice(0, dst_num_rows));
    }
  }
};

Y
Yang Yu 已提交
55
class ShrinkRNNMemoryOpProtoMaker : public framework::OpProtoAndCheckerMaker {
Y
Yang Yu 已提交
56
 public:
57
  ShrinkRNNMemoryOpProtoMaker(OpProto *proto, OpAttrChecker *op_checker)
Y
Yang Yu 已提交
58
      : OpProtoAndCheckerMaker(proto, op_checker) {
59 60 61 62 63 64 65 66 67 68 69 70 71 72 73
    AddInput("X", "(LoDTensor) The RNN step memory to be shrinked.");
    AddInput("RankTable", "(LoDRankTable) The lod_rank_table of dynamic RNN.");
    AddInput("I",
             "(LoDTensor) The step index. The RNN step memory 'X' will be "
             "shrinked to match the size of the input of the index'th step.");
    AddOutput("Out", "(LoDTensor) The shrinked RNN step memory.");
    AddComment(
        R"DOC(
        In dynamic RNN, we are able to handle sequences of different lengths. 
        Because of the multiple lengths, the size of each step input can be 
        different, which may lead to a mismatching between the input of
        the current step and the memory generated by the previous one. This 
        operator shrinks memory according to the size of the next step input, 
        to make sure that they can match each other.
        )DOC");
Y
Yang Yu 已提交
74 75 76
  }
};

Y
Yang Yu 已提交
77
class ShrinkRNNMemoryInferShape : public framework::InferShapeBase {
Y
Yang Yu 已提交
78 79 80 81 82 83 84 85 86
 public:
  void operator()(framework::InferShapeContext *context) const override {
    PADDLE_ENFORCE(context->HasInput("X"));
    PADDLE_ENFORCE(context->HasInput("I"));
    PADDLE_ENFORCE(context->HasInput("RankTable"));
    context->SetOutputDim("Out", context->GetInputDim("X"));
  }
};

Y
Yang Yu 已提交
87
class ShrinkRNNMemoryGradOp : public ArrayOp {
Y
Yang Yu 已提交
88
 public:
Y
Yang Yu 已提交
89 90 91 92
  ShrinkRNNMemoryGradOp(const std::string &type,
                        const framework::VariableNameMap &inputs,
                        const framework::VariableNameMap &outputs,
                        const framework::AttributeMap &attrs)
Y
Yang Yu 已提交
93 94 95 96 97
      : ArrayOp(type, inputs, outputs, attrs) {}

  void Run(const framework::Scope &scope,
           const platform::DeviceContext &dev_ctx) const override {
    auto *dout_var = scope.FindVar(Input(framework::GradVarName("Out")));
Y
Yang Yu 已提交
98
    auto *dx_var = scope.FindVar(Output(framework::GradVarName("X")));
Y
Yang Yu 已提交
99 100 101 102 103 104 105 106 107 108 109 110 111 112
    PADDLE_ENFORCE(dx_var != nullptr, "Input Gradient should not be nullptr");
    auto *x_var = scope.FindVar(Input("X"));
    PADDLE_ENFORCE(x_var != nullptr);

    auto &x_tensor = x_var->Get<framework::LoDTensor>();
    auto &dx_tensor = *dx_var->GetMutable<framework::LoDTensor>();
    dx_tensor.Resize(x_tensor.dims());
    dx_tensor.mutable_data(x_tensor.place(), x_tensor.type());

    if (dout_var == nullptr) {  // dx_tensor fill zero
      math::set_constant(dev_ctx, &dx_tensor, 0.0f);
    } else {
      auto &dout_tensor = dout_var->Get<framework::LoDTensor>();
      auto height = dout_tensor.dims()[0];
D
dzhwinter 已提交
113 114
      auto slice = dx_tensor.Slice(0, static_cast<int>(height));
      framework::CopyFrom(dout_tensor, dout_tensor.place(), dev_ctx, &slice);
Y
Yang Yu 已提交
115
      if (dx_tensor.dims()[0] < height) {
Y
Yang Yu 已提交
116 117 118 119 120 121 122 123
        auto rest_tensor = dx_tensor.Slice(
            static_cast<int>(height), static_cast<int>(dout_tensor.dims()[0]));
        math::set_constant(dev_ctx, &rest_tensor, 0.0f);
      }
    }
  }
};

Y
Yang Yu 已提交
124
class ShrinkRNNMemoryGradInferShape : public framework::InferShapeBase {
Y
Yang Yu 已提交
125 126 127 128 129 130 131 132 133
 public:
  void operator()(framework::InferShapeContext *context) const override {
    PADDLE_ENFORCE(context->HasInput("X"));
    PADDLE_ENFORCE(context->HasOutput(framework::GradVarName("X")));
    context->SetOutputDim(framework::GradVarName("X"),
                          context->GetInputDim("X"));
  }
};

Y
Yang Yu 已提交
134
class ShrinkRNNGradOpMaker : public framework::SingleGradOpDescMaker {
Y
Yang Yu 已提交
135 136 137 138
 public:
  using framework::SingleGradOpDescMaker::SingleGradOpDescMaker;

 protected:
Y
Yu Yang 已提交
139 140
  std::unique_ptr<framework::OpDesc> Apply() const override {
    auto *op = new framework::OpDesc();
Y
Yang Yu 已提交
141
    op->SetType("shrink_rnn_memory_grad");
Y
Yang Yu 已提交
142 143 144 145
    op->SetInput("X", Input("X"));
    op->SetInput(framework::GradVarName("Out"), OutputGrad("Out"));
    op->SetOutput(framework::GradVarName("X"), InputGrad("X"));
    op->SetAttrMap(Attrs());
Y
Yu Yang 已提交
146
    return std::unique_ptr<framework::OpDesc>(op);
Y
Yang Yu 已提交
147 148 149 150 151 152 153
  }
};

}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;
Y
Yang Yu 已提交
154 155 156 157 158
REGISTER_OPERATOR(shrink_rnn_memory, ops::ShrinkRNNMemoryOp,
                  ops::ShrinkRNNMemoryInferShape,
                  ops::ShrinkRNNMemoryOpProtoMaker, ops::ShrinkRNNGradOpMaker);
REGISTER_OPERATOR(shrink_rnn_memory_grad, ops::ShrinkRNNMemoryGradOp,
                  ops::ShrinkRNNMemoryGradInferShape);