recurrent_network_op.cc 16.1 KB
Newer Older
Y
Yan Chunwei 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.

   Licensed under the Apache License, Version 2.0 (the "License");
   you may not use this file except in compliance with the License.
   You may obtain a copy of the License at

   http://www.apache.org/licenses/LICENSE-2.0

   Unless required by applicable law or agreed to in writing, software
   distributed under the License is distributed on an "AS IS" BASIS,
   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
   See the License for the specific language governing permissions and
   limitations under the License. */

#include "paddle/operators/recurrent_network_op.h"

#include <glog/logging.h>
#include <cstring>
#include <sstream>

#include "paddle/framework/net.h"
#include "paddle/framework/op_registry.h"
#include "paddle/platform/enforce.h"

namespace paddle {
namespace operators {

namespace rnn {

void SegmentInputs(std::vector<std::shared_ptr<Scope>>& step_scopes,
                   const std::vector<Link>& inlinks,
                   const size_t seq_len) {
  PADDLE_ENFORCE(!inlinks.empty(), "no in links are provided.");
  for (size_t i = 0; i < inlinks.size(); ++i) {
    Tensor* input =
36
        step_scopes[0]->FindVar(inlinks[i].external)->GetMutable<Tensor>();
Y
Yan Chunwei 已提交
37 38 39 40 41
    DDim dims = input->dims();
    PADDLE_ENFORCE(static_cast<size_t>(dims[0]) == seq_len,
                   "all the inlinks must have same length");
    DDim step_dims = slice_ddim(dims, 1, dims.size());
    for (size_t j = 0; j < seq_len; j++) {
42 43
      Tensor* step_input =
          step_scopes[j]->NewVar(inlinks[i].internal)->GetMutable<Tensor>();
Y
Yan Chunwei 已提交
44 45 46 47 48 49 50 51 52 53 54
      *step_input = input->Slice<float>(j, j + 1);
      step_input->Resize(step_dims);
    }
  }
}

void ConcatOutputs(std::vector<std::shared_ptr<Scope>>& step_scopes,
                   const std::vector<Link>& outlinks,
                   const size_t seq_len) {
  for (size_t i = 0; i < outlinks.size(); i++) {
    Tensor* output =
55
        step_scopes[0]->FindVar(outlinks[i].external)->GetMutable<Tensor>();
Y
Yan Chunwei 已提交
56 57 58 59

    // TODO(qingiqng) remove following code after adding
    // InferShape in RecurrentGradientOp
    DDim step_dims = step_scopes[0]
60
                         ->FindVar(outlinks[i].internal)
Y
Yan Chunwei 已提交
61 62 63 64 65 66 67
                         ->GetMutable<Tensor>()
                         ->dims();
    std::vector<int> dims_vec = vectorize(step_dims);
    dims_vec.insert(dims_vec.begin(), seq_len);
    output->mutable_data<float>(make_ddim(dims_vec), platform::CPUPlace());

    for (size_t j = 0; j < seq_len; j++) {
68 69
      Tensor* step_output =
          step_scopes[j]->FindVar(outlinks[i].internal)->GetMutable<Tensor>();
70 71
      // TODO(luotao02) data type and platform::DeviceContext() should set
      // correctly
Y
Yan Chunwei 已提交
72
      (output->Slice<float>(j, j + 1))
73
          .CopyFrom<float>(*step_output, platform::CPUPlace());
Y
Yan Chunwei 已提交
74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97
    }
  }
}

void LinkMemories(std::vector<std::shared_ptr<Scope>>& scopes,
                  const std::vector<rnn::MemoryAttr>& memories,
                  size_t step_id,
                  int offset) {
  PADDLE_ENFORCE(step_id < scopes.size(),
                 "step [%d] is out of range of step scopes' size [%d]",
                 step_id,
                 scopes.size());
  PADDLE_ENFORCE(static_cast<int>(step_id) + offset >= 0,
                 "offset [%d] must be large than -[%d]",
                 offset,
                 step_id);
  PADDLE_ENFORCE(step_id + offset < scopes.size(),
                 "offset [%d] is out of range, it must be less than (%d - %d)",
                 offset,
                 scopes.size(),
                 step_id);
  std::shared_ptr<Scope> scope = scopes[step_id];
  std::shared_ptr<Scope> linked_scope = scopes[step_id + offset];
  for (auto& attr : memories) {
98
    auto mem = scope->NewVar(attr.pre_var)->GetMutable<Tensor>();
Y
Yan Chunwei 已提交
99
    // maybe share variable is better?
100
    auto linked_mem = linked_scope->FindVar(attr.var)->GetMutable<Tensor>();
Y
Yan Chunwei 已提交
101 102 103 104
    mem->ShareDataWith<float>(*linked_mem);

    // TODO(qingqing) remove following code
    // the memory of current step should be allocated in step net
105
    auto m = scope->NewVar(attr.var)->GetMutable<Tensor>();
Y
Yan Chunwei 已提交
106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172
    // for unit test, as addOp and mulOp are null currently, if not
    // mutable_data, mem.data() in output will be error. We will
    // remove this line after merge the correct addOp and mulOp.
    m->mutable_data<float>(mem->dims(), platform::CPUPlace());
  }
}

void InitArgument(const ArgumentName& name,
                  Argument* arg,
                  const OperatorBase& op) {
  arg->step_net = op.Input(name.step_net);
  arg->step_scopes = op.Output(name.step_scopes);

  auto inlinks = op.Inputs(name.inlinks);
  auto inlink_alias = op.GetAttr<std::vector<std::string>>(name.inlink_alias);
  PADDLE_ENFORCE(inlinks.size() == inlink_alias.size(),
                 "the size of inlinks and inlink_alias don't match:%d,%d",
                 inlinks.size(),
                 inlink_alias.size());
  for (size_t i = 0; i < inlinks.size(); ++i) {
    rnn::Link link;
    link.external = inlinks[i];
    link.internal = inlink_alias[i];
    (arg->inlinks).push_back(link);
  }

  auto outlinks = op.Outputs(name.outlinks);
  auto outlink_alias = op.GetAttr<std::vector<std::string>>(name.outlink_alias);
  PADDLE_ENFORCE(outlinks.size() == outlink_alias.size(),
                 "the size of outlinks and outlink_alias don't match:%d,%d",
                 outlinks.size(),
                 outlink_alias.size());
  for (size_t i = 0; i < outlinks.size(); ++i) {
    rnn::Link link;
    link.external = outlinks[i];
    link.internal = outlink_alias[i];
    (arg->outlinks).push_back(link);
  }

  auto boot_memories = op.Inputs(name.boot_memories);

  // attributes
  auto memories = op.GetAttr<std::vector<std::string>>(name.memories);
  auto pre_memories = op.GetAttr<std::vector<std::string>>(name.pre_memories);

  PADDLE_ENFORCE(memories.size() == boot_memories.size(),
                 "the size of memories, boot_memories don't match:%d,%d",
                 memories.size(),
                 boot_memories.size());
  PADDLE_ENFORCE(pre_memories.size() == boot_memories.size(),
                 "the size of pre_memories, boot_memories don't match:%d,%d",
                 pre_memories.size(),
                 boot_memories.size());
  PADDLE_ENFORCE(memories.size() > 0, "more than 1 memories should be set");

  for (size_t i = 0; i < memories.size(); ++i) {
    rnn::MemoryAttr mem_attr;
    mem_attr.var = memories[i];
    mem_attr.pre_var = pre_memories[i];
    mem_attr.boot_var = boot_memories[i];
    (arg->memories).push_back(mem_attr);
  }
}

}  // namespace rnn

void RecurrentAlgorithm::InferShape(const std::shared_ptr<Scope>& scope) const {
173
  seq_len_ = scope->FindVar((arg_->inlinks[0]).external)
Y
Yan Chunwei 已提交
174 175 176 177 178 179 180 181 182 183 184 185 186 187
                 ->GetMutable<Tensor>()
                 ->dims()[0];
  CreateScopes(scope);
  auto step_scopes = GetStepScopes(scope);

  // SegmentInputs is called in InferShape. The input must hold memory in
  // SegmentInputs. But the other op only set dimension for the output in
  // InferShape. That's a problem. Wether the RNN op needs InferShape or not?
  // Wether the following functions (SegmentInputs, InitMemories, ...) need
  // to rewrite for RNN op?
  rnn::SegmentInputs(step_scopes, arg_->inlinks, seq_len_);

  InitMemories(step_scopes[0]);

188
  PADDLE_ENFORCE(scope->FindVar(arg_->step_net),
Y
Yan Chunwei 已提交
189 190
                 "stepnet [%s] is not in scope.",
                 arg_->step_net);
191
  Variable* net = scope->FindVar(arg_->step_net);
Y
Yan Chunwei 已提交
192 193 194 195 196 197 198 199 200 201 202 203 204
  PADDLE_ENFORCE(net != nullptr, "failed to get step net");
  // If the InferShape is called in OperatorBase's run function,
  // the rnn op only needs to do InferShape for the first time step
  for (size_t i = 0; i < seq_len_; i++) {
    if (i > 0) {
      rnn::LinkMemories(step_scopes, arg_->memories, i, -1);
    }
    net->GetMutable<NetOp>()->InferShape(step_scopes[i]);
  }

  auto outlinks = arg_->outlinks;
  for (size_t i = 0; i < outlinks.size(); i++) {
    DDim step_dims = step_scopes[0]
205
                         ->FindVar(outlinks[i].internal)
Y
Yan Chunwei 已提交
206 207 208 209 210 211
                         ->GetMutable<Tensor>()
                         ->dims();
    std::vector<int> dims_vec = vectorize(step_dims);
    // now only support fixed length
    dims_vec.insert(dims_vec.begin(), seq_len_);
    Tensor* output =
212
        step_scopes[0]->FindVar(outlinks[i].external)->GetMutable<Tensor>();
Y
Yan Chunwei 已提交
213 214 215 216 217 218 219 220
    output->Resize(make_ddim(dims_vec));
  }
}

void RecurrentAlgorithm::Run(const std::shared_ptr<Scope>& scope,
                             const platform::DeviceContext& dev_ctx) const {
  auto step_scopes = GetStepScopes(scope);

221
  Variable* net = scope->FindVar(arg_->step_net);
Y
Yan Chunwei 已提交
222 223 224 225 226 227 228 229 230 231 232 233 234 235 236
  for (size_t step_id = 0; step_id < seq_len_; step_id++) {
    // the link memory is done in InferShape
    // maybe remove following code after testing
    if (step_id > 0) {
      rnn::LinkMemories(step_scopes, arg_->memories, step_id, -1);
    }
    net->GetMutable<NetOp>()->Run(step_scopes[step_id], dev_ctx);
  }

  rnn::ConcatOutputs(step_scopes, arg_->outlinks, seq_len_);
}

void RecurrentAlgorithm::CreateScopes(std::shared_ptr<Scope> scope) const {
  // TODO(xxx) Only two scopes are needed for inference, this case will be
  // supported later.
237
  auto step_scopes = scope->FindVar(arg_->step_scopes)
Y
Yan Chunwei 已提交
238 239 240 241 242 243 244
                         ->GetMutable<std::vector<std::shared_ptr<Scope>>>();

  if (seq_len_ > step_scopes->size()) {
    for (size_t i = step_scopes->size(); i < seq_len_; ++i) {
      std::shared_ptr<Scope> step_scope = std::make_shared<Scope>(scope);

      // Now all variables in scope must be created outside of op.
245
      auto net_op = scope->FindVar(arg_->step_net)->GetMutable<NetOp>();
Y
Yan Chunwei 已提交
246
      for (auto& input : net_op->inputs_) {
247
        step_scope->NewVar(input);
Y
Yan Chunwei 已提交
248 249
      }
      for (auto& output : net_op->outputs_) {
250
        step_scope->NewVar(output);
Y
Yan Chunwei 已提交
251 252 253 254 255 256 257 258 259
      }

      step_scopes->push_back(std::make_shared<Scope>(step_scope));
    }
  }
}

void RecurrentAlgorithm::InitMemories(std::shared_ptr<Scope> step_scope) const {
  for (auto& attr : arg_->memories) {
260 261
    Tensor* pre_mem = step_scope->NewVar(attr.pre_var)->GetMutable<Tensor>();
    PADDLE_ENFORCE(step_scope->FindVar(attr.boot_var),
Y
Yan Chunwei 已提交
262 263 264
                   "memory [%s]'s boot variable [%s] not exists",
                   attr.var,
                   attr.boot_var);
265
    Tensor* boot_mem = step_scope->FindVar(attr.boot_var)->GetMutable<Tensor>();
Y
Yan Chunwei 已提交
266 267 268 269 270
    pre_mem->ShareDataWith<float>(*boot_mem);

    // TODO(qingqing) remove following code
    // the memory of current step should be allocated in step net
    // here for unit test
271
    auto cur_step_mem = step_scope->NewVar(attr.var)->GetMutable<Tensor>();
Y
Yan Chunwei 已提交
272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334
    cur_step_mem->mutable_data<float>(boot_mem->dims(), platform::CPUPlace());
  }
}

const rnn::ArgumentName RecurrentOp::kArgName{"step_net",
                                              "step_scopes",
                                              "inlinks",
                                              "outlinks",
                                              "inlink_alias",
                                              "outlink_alias",
                                              "memories",
                                              "pre_memories",
                                              "boot_memories"};

const rnn::ArgumentName RecurrentGradientOp::kArgName{"step_net",
                                                      "step_scopes",
                                                      "outlink@grad",
                                                      "inlink@grad",
                                                      "inlink_alias",
                                                      "outlink_alias",
                                                      "memories",
                                                      "pre_memories",
                                                      "boot_memories@grad"};

void RecurrentOp::Init() {
  OperatorBase::Init();
  std::unique_ptr<rnn::Argument> arg(new rnn::Argument());
  rnn::InitArgument(kArgName, arg.get(), *this);
  alg_.Init(std::move(arg));
}

class RecurrentAlgorithmProtoAndCheckerMaker : public OpProtoAndCheckerMaker {
public:
  RecurrentAlgorithmProtoAndCheckerMaker(OpProto* proto,
                                         OpAttrChecker* op_checker)
      : OpProtoAndCheckerMaker(proto, op_checker) {
    const auto& name = RecurrentOp::kArgName;
    // inputs and outputs stored in proto
    AddInputs(name.inlinks,
              "the input that need to be segmented for each step.");
    AddInputs(name.boot_memories, "variables to initialize memories.");
    AddInput(name.step_net, "network shared by all steps.");

    AddOutputs(name.outlinks,
               "the output that need to concated for all steps.");
    AddOutput(name.step_scopes, "step scopes");

    // Attributes stored in AttributeMap
    AddAttr<std::vector<std::string>>(name.inlink_alias, "alias of inlinks");
    AddAttr<std::vector<std::string>>(name.outlink_alias, "alias of outlinks");
    AddAttr<std::vector<std::string>>(name.pre_memories,
                                      "names of pre-memories");
    AddAttr<std::vector<std::string>>(name.memories, "names of memories");

    AddComment("This is a recurrent group operator.");
  }
};

void RecurrentGradientAlgorithm::Run(
    const std::shared_ptr<Scope>& scope,
    const platform::DeviceContext& dev_ctx) const {
  auto step_scopes = GetStepScopes(scope);
  rnn::SegmentInputs(step_scopes, arg_->inlinks, seq_len_);
335 336
  PADDLE_ENFORCE(scope->FindVar(arg_->step_net), "step net is not in scope.");
  Variable* net = scope->FindVar(arg_->step_net);
Y
Yan Chunwei 已提交
337 338 339 340 341 342 343 344 345 346 347 348 349 350
  PADDLE_ENFORCE(net != nullptr, "failed to get step net");
  for (int step_id = seq_len_ - 1; step_id >= 0; --step_id) {
    if (static_cast<size_t>(step_id) != seq_len_ - 1) {
      rnn::LinkMemories(step_scopes, arg_->memories, step_id, 1);
    }
    net->GetMutable<NetOp>()->Run(step_scopes[step_id], dev_ctx);
  }
  LinkBootMemoryGradients(step_scopes[0]);
  rnn::ConcatOutputs(step_scopes, arg_->outlinks, seq_len_);
}

void RecurrentGradientAlgorithm::LinkBootMemoryGradients(
    std::shared_ptr<Scope> step_scope) const {
  for (auto& attr : arg_->memories) {
351
    Tensor* mem_grad = step_scope->NewVar(attr.var)->GetMutable<Tensor>();
Y
Yan Chunwei 已提交
352 353
    PADDLE_ENFORCE(mem_grad != nullptr,
                   "boot_tensor should be retrieved before");
354
    PADDLE_ENFORCE(step_scope->FindVar(attr.boot_var),
Y
Yan Chunwei 已提交
355 356 357 358
                   "memory [%s]'s boot variable [%s] not exists",
                   attr.var,
                   attr.boot_var);
    Tensor* boot_mem_grad =
359
        step_scope->NewVar(attr.boot_var)->GetMutable<Tensor>();
Y
Yan Chunwei 已提交
360 361 362 363 364 365
    boot_mem_grad->ShareDataWith<float>(*mem_grad);
  }
}

void RecurrentGradientAlgorithm::InferShape(
    const std::shared_ptr<Scope>& scope) const {
366
  seq_len_ = scope->FindVar((arg_->inlinks[0]).external)
Y
Yan Chunwei 已提交
367 368 369 370 371
                 ->GetMutable<Tensor>()
                 ->dims()[0];
  auto step_scopes = GetStepScopes(scope);
  rnn::SegmentInputs(step_scopes, arg_->inlinks, seq_len_);

372 373
  PADDLE_ENFORCE(scope->FindVar(arg_->step_net), "step net is not in scope.");
  Variable* net = scope->FindVar(arg_->step_net);
Y
Yan Chunwei 已提交
374 375 376 377 378 379 380 381 382 383 384 385
  PADDLE_ENFORCE(net != nullptr, "failed to get step net");

  for (int step_id = seq_len_ - 1; step_id >= 0; --step_id) {
    if (static_cast<size_t>(step_id) != seq_len_ - 1) {
      rnn::LinkMemories(step_scopes, arg_->memories, step_id, 1);
    }
    net->GetMutable<NetOp>()->InferShape(step_scopes[step_id]);
  }

  auto outlinks = arg_->outlinks;
  for (size_t i = 0; i < outlinks.size(); i++) {
    DDim step_dims = step_scopes[0]
386
                         ->FindVar(outlinks[i].internal)
Y
Yan Chunwei 已提交
387 388 389 390 391 392
                         ->GetMutable<Tensor>()
                         ->dims();
    std::vector<int> dims_vec = vectorize(step_dims);
    // now only support fixed length
    dims_vec.insert(dims_vec.begin(), seq_len_);
    Tensor* output =
393
        step_scopes[0]->FindVar(outlinks[i].external)->GetMutable<Tensor>();
Y
Yan Chunwei 已提交
394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411
    output->Resize(make_ddim(dims_vec));
  }
  LinkBootMemoryGradients(step_scopes[0]);
}

void RecurrentGradientOp::Init() {
  OperatorBase::Init();
  std::unique_ptr<rnn::Argument> arg(new rnn::Argument());
  rnn::InitArgument(kArgName, arg.get(), *this);
  alg_.Init(std::move(arg));
}

}  // namespace operators
}  // namespace paddle

REGISTER_OP(recurrent_op,
            paddle::operators::RecurrentOp,
            paddle::operators::RecurrentAlgorithmProtoAndCheckerMaker);