tracer.cc 12.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/fluid/imperative/tracer.h"

M
minqiyang 已提交
17
#include <memory>
M
minqiyang 已提交
18
#include <set>
M
minqiyang 已提交
19 20
#include <unordered_map>
#include <unordered_set>
21
#include <utility>
M
minqiyang 已提交
22

M
minqiyang 已提交
23
#include "paddle/fluid/framework/var_type_inference.h"
M
minqiyang 已提交
24 25 26
#include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/platform/device_context.h"
#include "paddle/fluid/platform/enforce.h"
C
chengduo 已提交
27
#include "paddle/fluid/platform/profiler.h"
M
minqiyang 已提交
28

29
namespace paddle {
M
minqiyang 已提交
30 31 32 33 34
namespace imperative {

void CreateGradOp(const framework::OpDesc& op_desc,
                  const std::unordered_set<std::string>& no_grad_set,
                  const std::vector<framework::BlockDesc*>& grad_sub_block,
X
Xin Pan 已提交
35
                  std::vector<framework::OpDesc*>* grad_op_descs,
M
minqiyang 已提交
36
                  std::unordered_map<std::string, std::string>* grad_to_var) {
X
Xin Pan 已提交
37
  PADDLE_ENFORCE(grad_op_descs->empty());
X
Xin Pan 已提交
38 39 40
  const framework::OpInfo& op_info =
      framework::OpInfoMap::Instance().Get(op_desc.Type());
  if (!op_info.grad_op_maker_) return;
J
JiabinYang 已提交
41

X
Xin Pan 已提交
42 43
  std::vector<std::unique_ptr<framework::OpDesc>> descs =
      op_info.GradOpMaker()(op_desc, no_grad_set, grad_to_var, grad_sub_block);
X
Xin Pan 已提交
44 45 46
  for (auto& desc : descs) {
    grad_op_descs->emplace_back(desc.release());
  }
M
minqiyang 已提交
47 48
}

49 50
void InitGrad(VarBase* var, platform::DeviceContext* dev_ctx) {
  PADDLE_ENFORCE_NOT_NULL(var, "Could not get valid var base");
M
minqiyang 已提交
51 52
  PADDLE_ENFORCE_NOT_NULL(dev_ctx,
                          "Could not get valid device from forward op");
53 54 55 56 57 58 59 60 61

  if (var->grads_ == nullptr) {
    auto& var_t = var->var_->Get<framework::LoDTensor>();
    var->grads_ = new VarBase(var->GradName(), framework::proto::VarType::FP32,
                              framework::vectorize(var_t.dims()),
                              dev_ctx->GetPlace(), true, false);
    auto grad_t = var->grads_->var_->GetMutable<framework::LoDTensor>();
    operators::math::set_constant(*dev_ctx, grad_t, 0.0);
  }
M
minqiyang 已提交
62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79
}

platform::Place GetExpectedPlace(platform::Place place, VarBasePtrMap inputs) {
  platform::Place result = place;
  for (auto it : inputs) {
    for (VarBase* var : it.second) {
      platform::Place tmp_place =
          var->var_->Get<framework::LoDTensor>().place();
      if (!platform::is_same_place(tmp_place, result)) {
        PADDLE_THROW(
            "Input variable should keep in the same place: %s, but get place: "
            "%s of input %s instead",
            result, tmp_place, it.first);
      }
    }
  }

  return result;
M
minqiyang 已提交
80 81
}

82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137
framework::VariableNameMap CreateInputVarNameMap(
    const OpBase* op, const VarBasePtrMap& varbase_map) {
  framework::VariableNameMap result;

  auto& info_map = framework::OpInfoMap::Instance();
  auto* op_info = info_map.GetNullable(op->Type());
  if (op_info == nullptr || op_info->proto_ == nullptr) {
    return result;
  }

  for (auto& in : op_info->Proto().inputs()) {
    auto it = varbase_map.find(in.name());
    if (it == varbase_map.end()) {
      PADDLE_ENFORCE(in.dispensable());
      result[in.name()] = {};
    } else {
      auto var_vector = it->second;
      std::vector<std::string> args;
      args.reserve(var_vector.size());
      for (VarBase* var_base : var_vector) {
        args.emplace_back(var_base->Name());
      }
      result[in.name()] = args;
    }
  }
  return result;
}

framework::VariableNameMap CreateOutputVarNameMap(
    const OpBase* op, const VarBasePtrMap& varbase_map) {
  framework::VariableNameMap result;

  auto& info_map = framework::OpInfoMap::Instance();
  auto* op_info = info_map.GetNullable(op->Type());
  if (op_info == nullptr || op_info->proto_ == nullptr) {
    return result;
  }

  for (auto& out : op_info->Proto().outputs()) {
    auto it = varbase_map.find(out.name());
    if (it == varbase_map.end()) {
      PADDLE_ENFORCE(out.dispensable());
      result[out.name()] = {};
    } else {
      auto var_vector = it->second;
      std::vector<std::string> args;
      args.reserve(var_vector.size());
      for (VarBase* var_base : var_vector) {
        args.emplace_back(var_base->Name());
      }
      result[out.name()] = args;
    }
  }
  return result;
}

M
minqiyang 已提交
138
Tracer::Tracer(framework::BlockDesc* root_block) : root_block_(root_block) {}
139

M
minqiyang 已提交
140
std::set<std::string> Tracer::Trace(OpBase* op, const VarBasePtrMap& inputs,
M
minqiyang 已提交
141
                                    VarBasePtrMap* outputs,
142
                                    framework::AttributeMap attrs_map,
M
minqiyang 已提交
143 144
                                    const platform::Place expected_place,
                                    const bool stop_gradient) {
C
chengduo 已提交
145
  platform::RecordEvent record_event(op->type_);
M
minqiyang 已提交
146 147 148
  framework::VariableValueMap invars_map;
  framework::VariableValueMap outvars_map;

149 150
  // Construct input_vars_map and output_vars_map
  std::map<std::string, VarBase*> current_vars_map;
M
minqiyang 已提交
151 152 153
  op->input_vars_ = inputs;
  for (auto it : op->input_vars_) {
    auto& invars = invars_map[it.first];
M
minqiyang 已提交
154
    invars.reserve(it.second.size());
M
minqiyang 已提交
155
    for (VarBase* inp : it.second) {
156 157
      PADDLE_ENFORCE_NOT_NULL(inp->var_, "op %s input %s nullptr", op->Type(),
                              inp->Name());
M
minqiyang 已提交
158

159
      invars.emplace_back(inp->var_.get());
160 161
      if (!stop_gradient) {
        current_vars_map[inp->Name()] = inp;
M
minqiyang 已提交
162
      }
163 164 165
      VLOG(3) << "input var name: " << inp->Name()
              << " inited: " << inp->var_->IsInitialized()
              << " stop_grad: " << inp->IsStopGradient();
M
minqiyang 已提交
166
    }
M
minqiyang 已提交
167
    op->TrackPreOp(it.first, it.second);
M
minqiyang 已提交
168 169
  }

M
minqiyang 已提交
170
  op->output_vars_ = *outputs;
M
minqiyang 已提交
171 172 173
  for (auto it : op->output_vars_) {
    auto& outvars = outvars_map[it.first];
    const std::vector<VarBase*>& outputs = it.second;
M
minqiyang 已提交
174
    outvars.reserve(outputs.size());
175
    for (size_t i = 0U; i < outputs.size(); ++i) {
M
minqiyang 已提交
176
      VarBase* out = outputs[i];
177
      outvars.emplace_back(out->var_.get());
X
Xin Pan 已提交
178
      out->TrackPreOp(op, it.first, i, stop_gradient);
179 180 181
      if (!stop_gradient) {
        current_vars_map[out->Name()] = out;
      }
M
minqiyang 已提交
182

183
      VLOG(3) << "output var name: " << out->Name()
184 185
              << " inited: " << out->var_->IsInitialized()
              << " stop_grad: " << out->IsStopGradient();
M
minqiyang 已提交
186 187 188
    }
  }

189 190 191 192
  // Check attrs and create op
  framework::VariableNameMap invars_name_map =
      CreateInputVarNameMap(op, inputs);
  framework::VariableNameMap outvars_name_map =
M
minqiyang 已提交
193
      CreateOutputVarNameMap(op, *outputs);
194 195 196 197 198 199 200 201 202 203

  auto& info = framework::OpInfoMap::Instance().Get(op->Type());
  if (info.Checker() != nullptr) {
    info.Checker()->Check(&attrs_map);
  }

  std::unique_ptr<framework::OperatorBase> op_base =
      framework::OpRegistry::CreateOp(op->Type(), invars_name_map,
                                      outvars_name_map, attrs_map);

M
minqiyang 已提交
204
  if (info.infer_var_type_) {
M
minqiyang 已提交
205
    RuntimeInferVarTypeContext infer_var_type_ctx(&inputs, outputs, &attrs_map);
M
minqiyang 已提交
206
    info.infer_var_type_(&infer_var_type_ctx);
M
minqiyang 已提交
207 208
  }

209 210 211
  // TODO(minqiyang): Support infer var type in imperative mode
  // Run forward op
  VLOG(3) << "tracer running " << op->Type();
M
minqiyang 已提交
212 213 214 215 216 217 218 219
  framework::RuntimeContext ctx(invars_map, outvars_map);

  // TODO(panyx0718): Cache p.
  framework::OperatorWithKernel* op_kernel =
      dynamic_cast<framework::OperatorWithKernel*>(op_base.get());
  PADDLE_ENFORCE_NOT_NULL(op_kernel, "only support op with kernel");

  framework::Scope scope;
P
Paddle CI 已提交
220
  op->place_ = GetExpectedPlace(expected_place, inputs);
221

P
Paddle CI 已提交
222 223
  PreparedOp prepared_op = PreparedOp::Prepare(ctx, *op_kernel, op->place_);
  prepared_op.op.RuntimeInferShape(scope, op->place_, ctx);
X
polish  
Xin Pan 已提交
224 225 226
  prepared_op.func(
      framework::ExecutionContext(prepared_op.op, scope, *prepared_op.dev_ctx,
                                  prepared_op.ctx, prepared_op.kernel_configs));
M
minqiyang 已提交
227

228
  // construct backward op
M
minqiyang 已提交
229
  std::set<std::string> vars_saved_for_backward;
M
minqiyang 已提交
230
  if (!stop_gradient) {
231 232 233
    VLOG(5) << "start construct backward op";

    // construct grad op descs
M
minqiyang 已提交
234
    op->attrs_ = attrs_map;
235 236
    std::unique_ptr<framework::OpDesc> fwd_op_desc(new framework::OpDesc(
        op->Type(), invars_name_map, outvars_name_map, attrs_map));
237 238
    std::unique_ptr<std::unordered_map<std::string, std::string>> grad_to_var(
        new std::unordered_map<std::string, std::string>());
239 240 241
    // NOTE(minqiyang): We don't support control flow op in imperative now
    // Add grad_block_ when we want to support it
    CreateGradOp(*fwd_op_desc, {}, {}, &op->grad_op_descs_, grad_to_var.get());
X
Xin Pan 已提交
242

243
    VLOG(5) << "create grad op desc: " << op->grad_op_descs_[0]->Type();
M
minqiyang 已提交
244

245 246 247 248 249 250
    const size_t grad_op_count = op->grad_op_descs_.size();

    op->grad_input_vars_.resize(grad_op_count);
    op->grad_output_vars_.resize(grad_op_count);

    for (size_t i = 0; i < grad_op_count; ++i) {
X
Xin Pan 已提交
251 252 253
      framework::OpDesc* grad_op_desc = op->grad_op_descs_[i];
      for (auto it : grad_op_desc->Inputs()) {
        auto& grad_in_vars = op->grad_input_vars_[i][it.first];
254
        grad_in_vars.reserve(it.second.size());
X
Xin Pan 已提交
255 256 257
        for (const std::string& grad_invar : it.second) {
          auto var_it = grad_to_var->find(grad_invar);
          if (var_it == grad_to_var->end()) {
258 259
            auto fwd_var_it = current_vars_map.find(grad_invar);
            PADDLE_ENFORCE(fwd_var_it != current_vars_map.end());
X
Xin Pan 已提交
260
            // Forward inputs or outputs.
M
minqiyang 已提交
261
            grad_in_vars.emplace_back(fwd_var_it->second);
X
Xin Pan 已提交
262
          } else {
263 264
            VarBase* var = current_vars_map[var_it->second];
            InitGrad(var, prepared_op.GetDeviceContext());
X
Xin Pan 已提交
265
            // Douts.
M
minqiyang 已提交
266
            grad_in_vars.emplace_back(var->grads_);
X
Xin Pan 已提交
267
          }
M
minqiyang 已提交
268 269

          vars_saved_for_backward.insert(it.first);
X
Xin Pan 已提交
270 271 272 273 274 275 276 277 278 279
        }
      }

      for (auto it : grad_op_desc->Outputs()) {
        auto& grad_out_vars = op->grad_output_vars_[i][it.first];
        for (const std::string& grad_outvar : it.second) {
          auto var_it = grad_to_var->find(grad_outvar);
          PADDLE_ENFORCE(var_it != grad_to_var->end(),
                         "Could not found the grad op output var, should this "
                         "operator %s's stop gradient be True",
280 281 282
                         op->Type());
          VarBase* var = current_vars_map[var_it->second];
          InitGrad(var, prepared_op.GetDeviceContext());
M
minqiyang 已提交
283
          grad_out_vars.push_back(var->grads_);
284
          VLOG(3) << "grads output var name: " << var->name_;
M
minqiyang 已提交
285 286 287 288 289
        }
      }
    }
  }

M
minqiyang 已提交
290
  return vars_saved_for_backward;
M
minqiyang 已提交
291 292
}

293 294 295
std::vector<VarBase*> Tracer::PyTrace(OpBase* op,
                                      const std::vector<VarBase*>& inputs,
                                      bool stop_gradient) {
296 297
  VLOG(3) << "py_trace " << op->Type();

X
Xin Pan 已提交
298
  op->input_vars_[PyLayer::kFwdInp] = inputs;
299

300
  std::vector<std::unique_ptr<framework::Variable>> ret_vars =
301
      PyLayer::Apply(op->forward_id_, inputs);
M
minqiyang 已提交
302
  op->TrackPreOp(PyLayer::kFwdInp, inputs);
303

304 305 306
  std::vector<VarBase*>& outputs = op->output_vars_[PyLayer::kFwdOut];
  outputs.reserve(ret_vars.size());
  for (size_t i = 0U; i != ret_vars.size(); ++i) {
307 308
    VarBase* out = new VarBase(string::Sprintf("%s_out_%d", op->Type(), i),
                               std::move(ret_vars[i]), nullptr, stop_gradient);
309
    outputs.emplace_back(out);
X
Xin Pan 已提交
310
    out->TrackPreOp(op, PyLayer::kFwdOut, i, stop_gradient);
311
  }
312

313
  if (!stop_gradient) {
314
    VLOG(5) << "start construct backward op";
X
Xin Pan 已提交
315 316
    op->grad_input_vars_.resize(1);
    op->grad_output_vars_.resize(1);
X
Xin Pan 已提交
317
    auto& grad_input_vars =
X
Xin Pan 已提交
318
        op->grad_input_vars_[0][framework::GradVarName(PyLayer::kFwdInp)];
X
Xin Pan 已提交
319
    auto& grad_output_vars =
X
Xin Pan 已提交
320
        op->grad_output_vars_[0][framework::GradVarName(PyLayer::kFwdOut)];
321

M
minqiyang 已提交
322 323
    for (VarBase* inp : inputs) {
      grad_input_vars.push_back(inp);
324 325
    }
    for (VarBase* out : outputs) {
M
minqiyang 已提交
326
      grad_input_vars.push_back(out);
327
    }
M
minqiyang 已提交
328

329
    // TODO(minqiyang): Add GPU support for PyLayer, only support CPU now
M
minqiyang 已提交
330
    platform::CPUPlace place;
331
    for (VarBase* out : outputs) {
332
      InitGrad(out, platform::DeviceContextPool::Instance().Get(place));
M
minqiyang 已提交
333
      grad_input_vars.push_back(out->grads_);
334
    }
M
minqiyang 已提交
335

336 337
    for (VarBase* inp : inputs) {
      InitGrad(inp, platform::DeviceContextPool::Instance().Get(place));
M
minqiyang 已提交
338
      grad_output_vars.push_back(inp->grads_);
339 340 341 342 343
    }
  }
  return outputs;
}

M
minqiyang 已提交
344
}  // namespace imperative
345
}  // namespace paddle