layer.cc 16.7 KB
Newer Older
J
Jiabin Yang 已提交
1
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
2 3 4 5 6 7 8 9 10 11 12 13 14 15
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/fluid/imperative/layer.h"
16

17
#include "paddle/fluid/framework/op_registry.h"
J
Jiabin Yang 已提交
18
#include "paddle/fluid/framework/variable_helper.h"
19 20
#include "paddle/fluid/imperative/infer_var_type_context.h"
#include "paddle/fluid/imperative/op_base.h"
J
Jiabin Yang 已提交
21 22
#include "paddle/fluid/imperative/prepared_operator.h"
#include "paddle/fluid/operators/math/math_function.h"
M
minqiyang 已提交
23
#include "paddle/fluid/platform/device_context.h"
J
Jiabin Yang 已提交
24
#include "paddle/fluid/platform/enforce.h"
C
chengduo 已提交
25
#include "paddle/fluid/platform/profiler.h"
26 27 28 29 30
#ifdef PADDLE_WITH_MKLDNN
#include "paddle/fluid/platform/mkldnn_helper.h"
#endif

DECLARE_bool(use_mkldnn);
31 32 33 34

namespace paddle {
namespace imperative {

J
Jiabin Yang 已提交
35
using framework::Variable;
Z
Zeng Jinle 已提交
36 37 38 39 40 41 42 43
void ThreadSafeNameSet::Insert(const std::string& name) {
  std::lock_guard<std::mutex> guard(mtx_);
  set_.insert(name);
}

void ThreadSafeNameSet::Remove(const std::string& name) {
  std::lock_guard<std::mutex> guard(mtx_);
  auto iter = set_.find(name);
44 45 46
  PADDLE_ENFORCE_EQ(
      iter != set_.end(), true,
      platform::errors::NotFound("Variable name %s does not exist", name));
Z
Zeng Jinle 已提交
47 48 49 50 51 52 53 54 55 56 57 58
  set_.erase(iter);
}

std::vector<std::string> ThreadSafeNameSet::Names() const {
  std::lock_guard<std::mutex> guard(mtx_);
  return std::vector<std::string>(set_.begin(), set_.end());
}

ThreadSafeNameSet VarBase::name_set_;

std::vector<std::string> VarBase::AliveVarNames() { return name_set_.Names(); }

J
Jiabin Yang 已提交
59 60 61 62 63 64 65 66 67
static framework::RuntimeContext PrepareRuntimeContext(
    const NameVarBaseMap& ins, const NameVarBaseMap& outs) {
  framework::VariableValueMap inputs, outputs;
  for (auto& in_pair : ins) {
    auto& in_ctx = inputs[in_pair.first];
    in_ctx.reserve(in_pair.second.size());
    for (auto& in_var : in_pair.second) {
      in_ctx.emplace_back(in_var->MutableVar());
    }
M
minqiyang 已提交
68 69
  }

J
Jiabin Yang 已提交
70 71 72 73 74
  for (auto& out_pair : outs) {
    auto& out_ctx = outputs[out_pair.first];
    out_ctx.reserve(out_pair.second.size());
    for (auto& out_var : out_pair.second) {
      out_ctx.emplace_back(out_var->MutableVar());
75
    }
J
Jiabin Yang 已提交
76 77 78 79
  }
  return framework::RuntimeContext(std::move(inputs), std::move(outputs));
}

80
template <typename VarType>
J
Jiabin Yang 已提交
81 82
static std::string DebugString(
    const std::string& name,
83
    const std::vector<std::shared_ptr<VarType>>& vars) {
J
Jiabin Yang 已提交
84 85
  std::stringstream ss;
  ss << name << "{";
M
minqiyang 已提交
86

J
Jiabin Yang 已提交
87 88
  for (size_t i = 0; i < vars.size(); ++i) {
    if (i > 0) ss << ", ";
M
minqiyang 已提交
89

J
Jiabin Yang 已提交
90 91 92 93 94
    if (vars[i] == nullptr) {
      ss << "NULL";
      continue;
    }
    ss << vars[i]->Name() << "[";
95
    const framework::Variable& var = vars[i]->Var();
J
Jiabin Yang 已提交
96 97 98 99 100 101 102 103 104 105 106 107 108
    if (!var.IsInitialized()) {
      ss << "NOT_INITED_VAR";
    } else if (var.IsType<framework::LoDTensor>()) {
      auto& tensor = var.Get<framework::LoDTensor>();
      ss << "LoDTensor<";
      if (tensor.IsInitialized()) {
        ss << framework::DataTypeToString(tensor.type()) << ", ";
        ss << tensor.place() << ", ";
        ss << "(" << tensor.dims() << ")";
      } else {
        ss << "NOT_INITED";
      }
      ss << ">";
109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124
    } else if (var.IsType<framework::SelectedRows>()) {
      ss << "SelectedRows<";
      auto& selected_rows = var.Get<framework::SelectedRows>();
      auto& tensor = selected_rows.value();
      auto& rows = selected_rows.rows();
      if (tensor.IsInitialized()) {
        ss << framework::DataTypeToString(tensor.type()) << ", ";
        ss << tensor.place() << ", ";
        ss << "height(" << selected_rows.height() << "), rows(";
        std::for_each(rows.cbegin(), rows.cend(),
                      [&ss](const int64_t r) { ss << r << " "; });
        ss << "), dims(" << tensor.dims() << ")";
      } else {
        ss << "NOT_INITED";
      }
      ss << ">";
J
Jiabin Yang 已提交
125 126 127 128
    } else {
      ss << "UNRESOLVED_TYPE";
    }
    ss << "]";
129
  }
130

J
Jiabin Yang 已提交
131 132
  ss << "}";
  return ss.str();
133 134
}

135 136 137 138
template <typename VarType>
static std::string LayerDebugStringImpl(const std::string& op_type,
                                        const NameVarMap<VarType>& ins,
                                        const NameVarMap<VarType>& outs) {
J
Jiabin Yang 已提交
139 140 141 142 143 144 145 146
  std::stringstream ss;
  ss << "Op(" << op_type << "): ";

  ss << "Inputs: ";

  size_t i = 0;
  for (auto& pair : ins) {
    if (i > 0) ss << ", ";
147
    ss << DebugString<VarType>(pair.first, pair.second);
J
Jiabin Yang 已提交
148
    ++i;
149 150
  }

J
Jiabin Yang 已提交
151 152 153 154
  ss << ",   Outputs: ";
  i = 0;
  for (auto& pair : outs) {
    if (i > 0) ss << ", ";
155
    ss << DebugString<VarType>(pair.first, pair.second);
J
Jiabin Yang 已提交
156 157 158 159
    ++i;
  }
  return ss.str();
}
160

161 162 163 164 165 166 167 168 169 170
std::string LayerDebugString(const std::string& op_type,
                             const NameVarMap<VarBase>& ins,
                             const NameVarMap<VarBase>& outs) {
  return LayerDebugStringImpl<VarBase>(op_type, ins, outs);
}

std::string LayerDebugString(const std::string& op_type,
                             const NameVarMap<VariableWrapper>& ins,
                             const NameVarMap<VariableWrapper>& outs) {
  return LayerDebugStringImpl<VariableWrapper>(op_type, ins, outs);
J
Jiabin Yang 已提交
171
}
172

173
VarBase::VarBase(const std::shared_ptr<VariableWrapper>& var)
174
    : var_(var), grad_node_(var->GetGradNode()) {
175 176
  if (auto grad_var = var_->GetGradVar()) {
    grad_var_ = std::make_shared<VarBase>(grad_var);
177 178 179 180 181 182 183 184 185 186 187 188
  }

  if (IsDebugEnabled()) {
    VLOG(10) << "Construct VarBase: " << Name();
    name_set_.Insert(Name());
  }
}

size_t VarBase::GradOpNum() const {
  return grad_node_ ? grad_node_->size() : 0;
}

J
Jiabin Yang 已提交
189
void VarBase::ClearGradient() {
190
  VLOG(4) << "ClearGradient " << Name();
J
Jiabin Yang 已提交
191
  if (grad_var_) {
192 193 194
    if (grad_var_->Var().IsType<framework::SelectedRows>()) {
      auto* grad_t =
          grad_var_->MutableVar()->GetMutable<framework::SelectedRows>();
195
      if (grad_t->mutable_value()->IsInitialized()) {
196 197 198
#ifdef PADDLE_WITH_MKLDNN
        if (FLAGS_use_mkldnn) ClearMKLDNNCache(grad_t->place());
#endif
199 200 201 202
        grad_t->mutable_rows()->clear();
        grad_t->mutable_value()->clear();
      }
    } else {
203
      platform::RecordEvent record_event("ClearGradient");
204 205
      auto* grad_t =
          grad_var_->MutableVar()->GetMutable<framework::LoDTensor>();
206 207 208 209
      if (grad_t->IsInitialized()) {
        auto* dev_ctx =
            platform::DeviceContextPool::Instance().Get(grad_t->place());
        operators::math::set_constant(*dev_ctx, grad_t, 0.0);
210 211 212
#ifdef PADDLE_WITH_MKLDNN
        if (FLAGS_use_mkldnn) ClearMKLDNNCache(grad_t->place());
#endif
213
      }
214
    }
215 216 217 218
    // TODO(zhouwei): It's better to free memory of grad by grad_t->claer.
    // But will have some bug on mac CPU of yolov3 model, why?
    // After fix this bug, function SetIsEmpty() isn't need
    grad_var_->SharedVar()->SetIsEmpty(true);
219
  }
J
Jiabin Yang 已提交
220
}
221

J
Jiabin Yang 已提交
222
std::shared_ptr<VarBase> VarBase::NewVarBase(const platform::Place& dst_place,
M
minqiyang 已提交
223
                                             const bool blocking) const {
224
  PADDLE_ENFORCE_EQ(
225 226
      Var().IsInitialized() && (Var().IsType<framework::LoDTensor>() ||
                                Var().IsType<framework::SelectedRows>()),
227 228 229
      true, platform::errors::InvalidArgument(
                "Variable is not initialized or Variable's type is not "
                "LoDTensor or SelectedRows when getting numpy tensor"));
230

231 232
  if (Var().IsType<framework::LoDTensor>()) {
    auto& src_tensor = Var().Get<framework::LoDTensor>();
233 234
    // TODO(Jiabin): change this after move unique_name generator to CXX
    auto new_var = std::make_shared<VarBase>(
235
        true, Name() + std::to_string(copied_counter_++));
236

237 238
    auto* dst_tensor =
        new_var->MutableVar()->GetMutable<framework::LoDTensor>();
239
    dst_tensor->set_lod(src_tensor.lod());
240 241 242
    new_var->SetPersistable(Persistable());
    new_var->SetDataType(DataType());
    new_var->SetType(Type());
243 244 245 246 247 248 249
    framework::TensorCopy(src_tensor, dst_place, dst_tensor);
    if (blocking) {
      platform::DeviceContextPool::Instance().Get(dst_place)->Wait();
      auto src_place = src_tensor.place();
      if (!(src_place == dst_place)) {
        platform::DeviceContextPool::Instance().Get(src_place)->Wait();
      }
250
    }
251 252
    VLOG(4) << "copy tensor " << Name() << " from " << Place() << " to "
            << dst_place;
253 254
    return new_var;
  } else {
255
    auto& src_selected_rows = Var().Get<framework::SelectedRows>();
256 257 258 259
    auto new_var = std::make_shared<VarBase>(
        false, "Itmp" + std::to_string(copied_counter_++));
    new_var->SetType(framework::proto::VarType::SELECTED_ROWS);
    auto* dst_selected_rows =
260
        new_var->MutableVar()->GetMutable<framework::SelectedRows>();
261 262 263 264 265 266 267 268 269 270 271 272

    framework::TensorCopy(src_selected_rows.value(), dst_place,
                          dst_selected_rows->mutable_value());
    if (blocking) {
      platform::DeviceContextPool::Instance().Get(dst_place)->Wait();
      auto src_place = src_selected_rows.place();
      if (!(src_place == dst_place)) {
        platform::DeviceContextPool::Instance().Get(src_place)->Wait();
      }
    }
    dst_selected_rows->set_height(src_selected_rows.height());
    dst_selected_rows->set_rows(src_selected_rows.rows());
273 274
    VLOG(4) << "copy tensor " << Name() << " from " << Place() << " to "
            << dst_place;
275 276
    return new_var;
  }
M
minqiyang 已提交
277 278
}

279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308
void VarBase::CopyFrom(const VarBase& src, const bool blocking) {
  if (SharedVar()->IsEmpty()) {
    VLOG(3) << "deep copy Variable from " << src.Name() << " to " << Name();
    SetPersistable(src.Persistable());
    SetDataType(src.DataType());
    SetType(src.Type());
    SetOverridedStopGradient(src.OverridedStopGradient());
    if (!src.SharedVar()->IsEmpty()) {
      const platform::Place& place = src.Place();
      if (src.Var().IsType<framework::LoDTensor>()) {
        auto& src_tensor = src.Var().Get<framework::LoDTensor>();
        auto* dst_tensor = MutableVar()->GetMutable<framework::LoDTensor>();
        dst_tensor->set_lod(src_tensor.lod());
        framework::TensorCopy(src_tensor, place, dst_tensor);
      } else if (src.Var().IsType<framework::SelectedRows>()) {
        auto& src_selected_rows = src.Var().Get<framework::SelectedRows>();
        auto* dst_selected_rows =
            MutableVar()->GetMutable<framework::SelectedRows>();
        dst_selected_rows->set_height(src_selected_rows.height());
        dst_selected_rows->set_rows(src_selected_rows.rows());
        framework::TensorCopy(src_selected_rows.value(), place,
                              dst_selected_rows->mutable_value());
      }
      if (blocking) {
        platform::DeviceContextPool::Instance().Get(place)->Wait();
      }
    }
  }
}

309 310 311 312 313 314 315 316 317
void VarBase::BumpInplaceVersion() {
  PADDLE_ENFORCE_EQ(
      Var().IsInitialized(), true,
      platform::errors::InvalidArgument(
          "Tensor %s has not been initialized, please check if it has no data.",
          Name()));
  MutableVar()->BumpInplaceVersion();
}

318
void OpBase::SetType(const std::string& type) {
H
hong 已提交
319
  op_ = framework::OpRegistry::CreateOp(type, {}, {}, {}, false);
J
Jiabin Yang 已提交
320
}
321

322 323 324
void OpBase::ClearBackwardTrace() {
  ins_.clear();
  outs_.clear();
H
hong 已提交
325 326
}

327 328 329 330 331
template <typename VarType>
static void OpBaseRunImpl(const framework::OperatorBase& op,
                          const NameVarMap<VarType>& ins,
                          const NameVarMap<VarType>& outs,
                          const framework::AttributeMap& attrs,
332
                          const framework::AttributeMap& default_attrs,
333 334
                          const platform::Place& place) {
  auto* op_kernel = dynamic_cast<const framework::OperatorWithKernel*>(&op);
335 336 337
  PADDLE_ENFORCE_NOT_NULL(
      op_kernel, platform::errors::PermissionDenied(
                     "Only support operator with kernel in Dygraph mode."));
338
  auto& info = op.Info();
J
Jiabin Yang 已提交
339
  if (info.infer_var_type_) {
340 341
    RuntimeInferVarTypeContext<VarType> infer_var_type_ctx(ins, outs, attrs,
                                                           default_attrs);
J
Jiabin Yang 已提交
342
    info.infer_var_type_(&infer_var_type_ctx);
X
Xin Pan 已提交
343
  }
344

J
Jiabin Yang 已提交
345 346 347
  // Initialize output var type
  for (auto& var_pair : outs) {
    for (auto& var : var_pair.second) {
348 349 350
      if (var) {
        InitializeVariable(var->MutableVar(), var->Type());
      }
351 352
    }
  }
X
Xin Pan 已提交
353

354
  VLOG(5) << LayerDebugString(op.Type(), ins, outs);
355

356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373
  /**
   * [ Why need temporary inputs here? ]
   *
   * PrepareData should not change original input tensor inplace.
   * Suppose the user defines a tensor(int), enters an op to execute,
   * and then this op rewrites GetExpectedKernelForVar, and converts
   * this tensor to float type during execution. After the dynamic
   * graph is executed, the user-defined variable will be lost, and
   * the user cannot get the originally defined int tensor, because
   * it has been converted to float, this should be regarded as a bug
   * in certain usage scenarios
   *
   * In static graph mode, when op is executed, a temporary scope
   * `transfer_scope` is created before PrepareData, the data after
   * transform is stored in the temporary scope, and then discarded
   * after the execution of op, but the original input is directly
   * overwritten in the previous dynamic graph implemention.
   */
374 375
  auto prepared_op =
      PreparedOp::Prepare(ins, outs, *op_kernel, place, attrs, default_attrs);
376 377 378
  auto tmp_ins_ptr =
      PrepareData<VarType>(*op_kernel, ins, prepared_op.kernel_type());
  if (tmp_ins_ptr == nullptr) {
379
    prepared_op.Run(ins, outs, attrs, default_attrs);
380
  } else {
381
    prepared_op.Run(*tmp_ins_ptr, outs, attrs, default_attrs);
382
  }
383

384
  VLOG(4) << LayerDebugString(op.Type(), ins, outs);
385 386 387 388 389 390 391 392 393 394

  // set the output var
  for (auto& var_pair : outs) {
    for (auto& var : var_pair.second) {
      // NOTE(zhiqu): The ouput may be NULL because of pruning.
      if (var) {
        SetForwardDataTypeOfGradVar(var);
      }
    }
  }
395 396
}

397 398 399 400
void OpBase::Run(const framework::OperatorBase& op,
                 const NameVarMap<VarBase>& ins,
                 const NameVarMap<VarBase>& outs,
                 const framework::AttributeMap& attrs,
401
                 const framework::AttributeMap& default_attrs,
402
                 const platform::Place& place) {
403
  OpBaseRunImpl<VarBase>(op, ins, outs, attrs, default_attrs, place);
404 405 406 407 408 409
}

void OpBase::Run(const framework::OperatorBase& op,
                 const NameVarMap<VariableWrapper>& ins,
                 const NameVarMap<VariableWrapper>& outs,
                 const framework::AttributeMap& attrs,
410
                 const framework::AttributeMap& default_attrs,
411
                 const platform::Place& place) {
412
  OpBaseRunImpl<VariableWrapper>(op, ins, outs, attrs, default_attrs, place);
413 414
}

415
void ClearNoNeedBufferInputs(OpBase* op) {
416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453
  auto& inferer = op->Info().NoNeedBufferVarsInferer();
  if (!inferer) return;
  auto* ins = op->GetMutableInsMap();
  const auto& no_need_buffer_slots =
      inferer(*ins, op->GetOutsMap(), op->Attrs());
  if (no_need_buffer_slots.empty()) return;

  for (auto& slot : no_need_buffer_slots) {
    auto iter = ins->find(slot);
    if (iter == ins->end()) continue;
    VLOG(2) << "Clear data buffer of " << slot << " in " << op->Type();

    PADDLE_ENFORCE_EQ(
        iter->second.IsGrad(), false,
        platform::errors::InvalidArgument(
            "Only forward variable buffers can be clear, this may be a bug"));

    for (auto& each_var : *(iter->second.MutableVarList())) {
      if (!each_var) continue;

      auto& var = each_var->Var();
      PADDLE_ENFORCE_EQ(var.IsType<framework::LoDTensor>(), true,
                        platform::errors::PermissionDenied(
                            "NoNeedBufferVars only support LoDTensor"));
      auto new_var = new VariableWrapper(each_var->Name());
      auto* new_tensor =
          new_var->MutableVar()->GetMutable<framework::LoDTensor>();
      auto& old_tensor = var.Get<framework::LoDTensor>();
      new_tensor->Resize(old_tensor.dims());
      new_tensor->set_lod(old_tensor.lod());
      each_var.reset(new_var);
    }
  }
}

std::shared_ptr<GradOpNode> CreateGradOpNode(
    const framework::OperatorBase& op, const NameVarBaseMap& ins,
    const NameVarBaseMap& outs, const framework::AttributeMap& attrs,
454
    const framework::AttributeMap& default_attrs, const platform::Place& place,
455
    const std::map<std::string, std::string>& inplace_map) {
456 457 458 459 460
  const auto& info = op.Info();
  if (!info.dygraph_grad_op_maker_) {
    return nullptr;
  }

461 462
  auto grad_node = info.dygraph_grad_op_maker_(op.Type(), ins, outs, attrs,
                                               default_attrs, inplace_map);
463
  if (grad_node && !grad_node->empty()) {
464 465 466 467
    for (auto& grad_op : *grad_node) {
      grad_op.SetId(OpBase::GenerateUniqueId());
      grad_op.SetPlace(place);
      ClearNoNeedBufferInputs(&grad_op);
468 469 470 471 472 473 474
    }
    return grad_node;
  } else {
    return nullptr;
  }
}

475 476
}  // namespace imperative
}  // namespace paddle