tracer.h 8.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

#include <map>
#include <string>
#include <vector>

#include "paddle/fluid/framework/op_desc.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/imperative/engine.h"
#include "paddle/fluid/imperative/layer.h"

namespace paddle {
namespace imperative {

void CreateGradOp(const framework::OpDesc& op_desc,
                  const std::unordered_set<std::string>& no_grad_set,
                  const std::vector<framework::BlockDesc*>& grad_sub_block,
                  framework::OpDesc** grad_op_desc,
                  std::unordered_map<std::string, std::string>* grad_to_var) {
  std::vector<std::unique_ptr<framework::OpDesc>> grad_op_descs =
      framework::OpInfoMap::Instance()
          .Get(op_desc.Type())
          .GradOpMaker()(op_desc, no_grad_set, grad_to_var, grad_sub_block);
  PADDLE_ENFORCE(grad_op_descs.size() == 1, "Only support 1 grad op now.");
  // TODO(panyx0718): Leak?
  *grad_op_desc = grad_op_descs[0].release();
}

X
Xin Pan 已提交
43 44 45 46 47 48 49 50
void InitVar(framework::Variable* var, framework::Variable* grad_var) {
  auto& var_t = var->Get<framework::LoDTensor>();
  float* data =
      grad_var->GetMutable<framework::LoDTensor>()->mutable_data<float>(
          var_t.dims(), platform::CPUPlace());
  std::fill(data, data + var_t.numel(), 0.0);
}

51 52
class Tracer {
 public:
53
  explicit Tracer(framework::BlockDesc* root_block) : root_block_(root_block) {}
54

M
minqiyang 已提交
55
  virtual ~Tracer() {}
56

X
Xin Pan 已提交
57 58 59
  void Trace(OpBase* op,
             const std::map<std::string, std::vector<VarBase*>>& inputs,
             const std::map<std::string, std::vector<VarBase*>>& outputs,
M
minqiyang 已提交
60
             framework::BlockDesc* block, const bool stop_gradient = false) {
X
Xin Pan 已提交
61 62
    std::map<std::string, VarBase*> vars;

63
    framework::OpDesc* op_desc = op->op_desc_;
M
minqiyang 已提交
64
    VLOG(3) << "tracer tracing " << op_desc->Type();
65 66 67 68 69
    op_desc->InferShape(*block);
    op_desc->InferVarType(block);
    std::unique_ptr<framework::OperatorBase> op_base =
        framework::OpRegistry::CreateOp(*op_desc);

X
Xin Pan 已提交
70 71 72 73 74 75 76 77 78 79 80 81 82
    framework::VariableValueMap invars_map;
    framework::VariableValueMap outvars_map;

    op->input_vars_ = inputs;
    for (auto it : op->input_vars_) {
      auto& invars = invars_map[it.first];
      for (VarBase* inp : it.second) {
        PADDLE_ENFORCE_NOT_NULL(inp->var_, "op %s input %s nullptr",
                                op->op_desc_->Type(), inp->var_desc_->Name());

        invars.push_back(inp->var_);
        vars[inp->var_desc_->Name()] = inp;
        if (inp->pre_op_) {
X
Xin Pan 已提交
83 84
          op->pre_ops_[it.first].push_back(inp->pre_op_);
          op->pre_ops_out_idx_[it.first].push_back(inp->pre_op_out_idx_);
85
        } else {
X
Xin Pan 已提交
86
          op->pre_ops_[it.first].push_back(nullptr);
87
        }
X
Xin Pan 已提交
88
        VLOG(3) << "input vname " << inp->var_desc_->Name() << " "
X
clean  
Xin Pan 已提交
89
                << inp->var_->IsInitialized();
90 91 92
      }
    }

X
Xin Pan 已提交
93 94 95 96 97 98 99 100 101 102
    op->output_vars_ = outputs;
    for (auto it : op->output_vars_) {
      auto& outvars = outvars_map[it.first];
      const std::vector<VarBase*>& outputs = it.second;
      for (size_t i = 0; i < outputs.size(); ++i) {
        VarBase* out = outputs[i];
        outvars.push_back(out->var_);
        vars[out->var_desc_->Name()] = out;

        framework::VarDesc* var_desc = block->FindVar(out->var_desc_->Name());
103
        if (var_desc->GetType() == framework::proto::VarType::LOD_TENSOR) {
X
Xin Pan 已提交
104
          out->var_->GetMutable<framework::LoDTensor>();
105 106 107
        } else {
          LOG(ERROR) << "tracer doesn't support yet";
        }
108
        out->stop_gradient_ = stop_gradient;
X
Xin Pan 已提交
109 110 111
        out->pre_op_ = op;
        out->pre_op_out_name_ = it.first;
        out->pre_op_out_idx_ = i;
M
minqiyang 已提交
112

X
Xin Pan 已提交
113 114
        VLOG(3) << "output vname " << out->var_desc_->Name() << " "
                << out->var_->IsInitialized();
115 116
      }
    }
M
minqiyang 已提交
117

M
minqiyang 已提交
118
    VLOG(3) << "tracer running " << op_desc->Type();
X
Xin Pan 已提交
119
    framework::RuntimeContext ctx(invars_map, outvars_map);
X
Xin Pan 已提交
120 121 122 123 124 125 126 127 128 129 130

    // TODO(panyx0718): Cache p.
    framework::OperatorWithKernel* op_kernel =
        dynamic_cast<framework::OperatorWithKernel*>(op_base.get());
    PADDLE_ENFORCE_NOT_NULL(op_kernel, "only support op with kernel");

    framework::Scope scope;
    platform::CPUPlace place;
    PreparedOp p = PreparedOp::Prepare(ctx, *op_kernel, place);
    p.op.RuntimeInferShape(scope, place, ctx);
    p.func(framework::ExecutionContext(p.op, scope, *p.dev_ctx, p.ctx));
X
Xin Pan 已提交
131

M
minqiyang 已提交
132
    if (!stop_gradient) {
M
minqiyang 已提交
133
      framework::OpDesc* grad_op_desc;
X
Xin Pan 已提交
134
      // TODO(panyx): Is this leaked?
X
polish  
Xin Pan 已提交
135 136 137
      std::unique_ptr<std::unordered_map<std::string, std::string>> grad_to_var(
          new std::unordered_map<std::string, std::string>());
      CreateGradOp(*op_desc, {}, {block}, &grad_op_desc, grad_to_var.get());
M
minqiyang 已提交
138
      op->grad_op_desc_ = grad_op_desc;
X
Xin Pan 已提交
139 140 141 142 143

      for (auto it : grad_op_desc->Inputs()) {
        auto& grad_in_vars = op->grad_input_vars_[it.first];
        for (const std::string& grad_invar : it.second) {
          block->FindRecursiveOrCreateVar(grad_invar);
X
Xin Pan 已提交
144 145
          auto var_it = grad_to_var->find(grad_invar);
          if (var_it == grad_to_var->end()) {
X
Xin Pan 已提交
146 147
            auto fwd_var_it = vars.find(grad_invar);
            PADDLE_ENFORCE(fwd_var_it != vars.end());
X
Xin Pan 已提交
148
            // Forward inputs or outputs.
X
Xin Pan 已提交
149 150 151 152 153 154
            grad_in_vars.push_back(fwd_var_it->second->var_);
          } else {
            VarBase* var = vars[var_it->second];
            if (!var->grads_->IsInitialized()) {
              InitVar(var->var_, var->grads_);
            }
X
Xin Pan 已提交
155
            // Douts.
X
Xin Pan 已提交
156 157 158 159
            grad_in_vars.push_back(var->grads_);
          }
        }
      }
M
minqiyang 已提交
160

X
Xin Pan 已提交
161 162 163 164
      for (auto it : grad_op_desc->Outputs()) {
        auto& grad_out_vars = op->grad_output_vars_[it.first];
        for (const std::string& grad_outvar : it.second) {
          block->FindRecursiveOrCreateVar(grad_outvar);
X
Xin Pan 已提交
165 166
          auto var_it = grad_to_var->find(grad_outvar);
          PADDLE_ENFORCE(var_it != grad_to_var->end());
X
Xin Pan 已提交
167 168 169 170 171 172 173
          VarBase* var = vars[var_it->second];
          if (!var->grads_->IsInitialized()) {
            InitVar(var->var_, var->grads_);
          }
          grad_out_vars.push_back(var->grads_);
        }
      }
M
minqiyang 已提交
174
    }
175

176 177 178
    op->block_ = block;
  }

X
Xin Pan 已提交
179 180 181 182 183 184 185 186 187
  std::vector<VarBase*> PyTrace(OpBase* op, const std::vector<VarBase*>& inputs,
                                bool stop_gradient = false) {
    VLOG(3) << "py_trace";
    op->input_vars_["X"] = inputs;
    op->output_vars_["Out"] = PyLayer::Apply(op->forward_id_, inputs);
    for (VarBase* inp : inputs) {
      if (inp->pre_op_) {
        op->pre_ops_["X"].push_back(inp->pre_op_);
        op->pre_ops_out_idx_["X"].push_back(inp->pre_op_out_idx_);
X
Xin Pan 已提交
188
      } else {
X
Xin Pan 已提交
189
        op->pre_ops_["X"].push_back(nullptr);
X
Xin Pan 已提交
190
      }
X
Xin Pan 已提交
191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223
    }

    auto& outputs = op->output_vars_["Out"];
    for (size_t i = 0; i < outputs.size(); ++i) {
      VarBase* out = outputs[i];
      out->stop_gradient_ = stop_gradient;
      out->pre_op_ = op;
      out->pre_op_out_name_ = "Out";
      out->pre_op_out_idx_ = i;
    }
    if (!stop_gradient) {
      auto& grad_input_vars = op->grad_input_vars_["X@GRAD"];
      auto& grad_output_vars = op->grad_output_vars_["Out@GRAD"];

      for (const VarBase* inp : inputs) {
        grad_input_vars.push_back(inp->var_);
      }
      for (VarBase* out : outputs) {
        grad_input_vars.push_back(out->var_);
      }
      for (VarBase* out : outputs) {
        grad_input_vars.push_back(out->grads_);
        if (!grad_input_vars.back()->IsInitialized()) {
          InitVar(out->var_, grad_input_vars.back());
        }
      }
      for (const VarBase* inp : inputs) {
        grad_output_vars.push_back(inp->grads_);
        if (!grad_output_vars.back()->IsInitialized()) {
          InitVar(inp->var_, grad_output_vars.back());
        }
      }
    }
X
Xin Pan 已提交
224 225 226
    return outputs;
  }

227 228 229 230 231 232
 private:
  framework::BlockDesc* root_block_;
};

}  // namespace imperative
}  // namespace paddle