tracer.cc 9.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/fluid/imperative/tracer.h"

M
minqiyang 已提交
17 18 19 20
#include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/platform/device_context.h"
#include "paddle/fluid/platform/enforce.h"

21
namespace paddle {
M
minqiyang 已提交
22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37
namespace imperative {

void CreateGradOp(const framework::OpDesc& op_desc,
                  const std::unordered_set<std::string>& no_grad_set,
                  const std::vector<framework::BlockDesc*>& grad_sub_block,
                  framework::OpDesc** grad_op_desc,
                  std::unordered_map<std::string, std::string>* grad_to_var) {
  std::vector<std::unique_ptr<framework::OpDesc>> grad_op_descs =
      framework::OpInfoMap::Instance()
          .Get(op_desc.Type())
          .GradOpMaker()(op_desc, no_grad_set, grad_to_var, grad_sub_block);
  PADDLE_ENFORCE(grad_op_descs.size() == 1, "Only support 1 grad op now.");
  // TODO(panyx0718): Leak?
  *grad_op_desc = grad_op_descs[0].release();
}

M
minqiyang 已提交
38 39 40 41
void InitVar(framework::Variable* var, framework::Variable* grad_var,
             platform::DeviceContext* dev_ctx) {
  PADDLE_ENFORCE_NOT_NULL(dev_ctx,
                          "Could not get valid device from forward op");
M
minqiyang 已提交
42
  auto& var_t = var->Get<framework::LoDTensor>();
M
minqiyang 已提交
43 44 45
  grad_var->GetMutable<framework::LoDTensor>()->mutable_data<float>(
      var_t.dims(), dev_ctx->GetPlace());
  operators::math::set_constant(
M
minqiyang 已提交
46
      *dev_ctx, grad_var->GetMutable<framework::LoDTensor>(), 0.0);
M
minqiyang 已提交
47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64
}

platform::Place GetExpectedPlace(platform::Place place, VarBasePtrMap inputs) {
  platform::Place result = place;
  for (auto it : inputs) {
    for (VarBase* var : it.second) {
      platform::Place tmp_place =
          var->var_->Get<framework::LoDTensor>().place();
      if (!platform::is_same_place(tmp_place, result)) {
        PADDLE_THROW(
            "Input variable should keep in the same place: %s, but get place: "
            "%s of input %s instead",
            result, tmp_place, it.first);
      }
    }
  }

  return result;
M
minqiyang 已提交
65 66 67 68
}

void Tracer::Trace(OpBase* op, const VarBasePtrMap& inputs,
                   const VarBasePtrMap& outputs, framework::BlockDesc* block,
M
minqiyang 已提交
69
                   const platform::Place expected_place,
M
minqiyang 已提交
70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86
                   const bool stop_gradient) {
  std::map<std::string, VarBase*> vars;

  framework::OpDesc* op_desc = op->op_desc_;
  VLOG(3) << "tracer tracing " << op_desc->Type();
  op_desc->InferShape(*block);
  op_desc->InferVarType(block);
  std::unique_ptr<framework::OperatorBase> op_base =
      framework::OpRegistry::CreateOp(*op_desc);

  framework::VariableValueMap invars_map;
  framework::VariableValueMap outvars_map;

  op->input_vars_ = inputs;
  for (auto it : op->input_vars_) {
    auto& invars = invars_map[it.first];
    for (VarBase* inp : it.second) {
M
minqiyang 已提交
87
      PADDLE_ENFORCE_NOT_NULL(inp->var_, "op %s input %s nullptr",
M
minqiyang 已提交
88 89
                              op->op_desc_->Type(), inp->var_desc_->Name());

M
minqiyang 已提交
90
      invars.push_back(inp->var_);
M
minqiyang 已提交
91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108
      vars[inp->var_desc_->Name()] = inp;
      if (inp->pre_op_) {
        op->pre_ops_[it.first].push_back(inp->pre_op_);
        op->pre_ops_out_idx_[it.first].push_back(inp->pre_op_out_idx_);
      } else {
        op->pre_ops_[it.first].push_back(nullptr);
      }
      VLOG(3) << "input vname " << inp->var_desc_->Name() << " "
              << inp->var_->IsInitialized();
    }
  }

  op->output_vars_ = outputs;
  for (auto it : op->output_vars_) {
    auto& outvars = outvars_map[it.first];
    const std::vector<VarBase*>& outputs = it.second;
    for (size_t i = 0; i < outputs.size(); ++i) {
      VarBase* out = outputs[i];
M
minqiyang 已提交
109
      outvars.push_back(out->var_);
M
minqiyang 已提交
110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136
      vars[out->var_desc_->Name()] = out;

      framework::VarDesc* var_desc = block->FindVar(out->var_desc_->Name());
      if (var_desc->GetType() == framework::proto::VarType::LOD_TENSOR) {
        out->var_->GetMutable<framework::LoDTensor>();
      } else {
        LOG(ERROR) << "tracer doesn't support yet";
      }
      out->stop_gradient_ = stop_gradient;
      out->pre_op_ = op;
      out->pre_op_out_name_ = it.first;
      out->pre_op_out_idx_ = i;

      VLOG(3) << "output vname " << out->var_desc_->Name() << " "
              << out->var_->IsInitialized();
    }
  }

  VLOG(3) << "tracer running " << op_desc->Type();
  framework::RuntimeContext ctx(invars_map, outvars_map);

  // TODO(panyx0718): Cache p.
  framework::OperatorWithKernel* op_kernel =
      dynamic_cast<framework::OperatorWithKernel*>(op_base.get());
  PADDLE_ENFORCE_NOT_NULL(op_kernel, "only support op with kernel");

  framework::Scope scope;
M
minqiyang 已提交
137 138 139 140 141 142
  op->expected_place_ = GetExpectedPlace(expected_place, inputs);
  PreparedOp prepared_op =
      PreparedOp::Prepare(ctx, *op_kernel, op->expected_place_);
  prepared_op.op.RuntimeInferShape(scope, op->expected_place_, ctx);
  prepared_op.func(framework::ExecutionContext(
      prepared_op.op, scope, *prepared_op.dev_ctx, prepared_op.ctx));
M
minqiyang 已提交
143 144 145

  if (!stop_gradient) {
    framework::OpDesc* grad_op_desc;
146 147 148 149
    // TODO(panyx): Is this leaked?
    std::unique_ptr<std::unordered_map<std::string, std::string>> grad_to_var(
        new std::unordered_map<std::string, std::string>());
    CreateGradOp(*op_desc, {}, {block}, &grad_op_desc, grad_to_var.get());
M
minqiyang 已提交
150 151 152 153 154 155 156 157 158 159
    op->grad_op_desc_ = grad_op_desc;

    for (auto it : grad_op_desc->Inputs()) {
      auto& grad_in_vars = op->grad_input_vars_[it.first];
      for (const std::string& grad_invar : it.second) {
        block->FindRecursiveOrCreateVar(grad_invar);
        auto var_it = grad_to_var->find(grad_invar);
        if (var_it == grad_to_var->end()) {
          auto fwd_var_it = vars.find(grad_invar);
          PADDLE_ENFORCE(fwd_var_it != vars.end());
160
          // Forward inputs or outputs.
M
minqiyang 已提交
161
          grad_in_vars.push_back(fwd_var_it->second->var_);
M
minqiyang 已提交
162 163
        } else {
          VarBase* var = vars[var_it->second];
M
minqiyang 已提交
164
          if (!var->grads_->var_->IsInitialized()) {
M
minqiyang 已提交
165
            LOG(ERROR) << "Init grad input " << it.first << " " << grad_invar;
M
minqiyang 已提交
166 167
            InitVar(var->var_, var->grads_->var_,
                    prepared_op.GetDeviceContext());
M
minqiyang 已提交
168
          }
169
          // Douts.
M
minqiyang 已提交
170
          grad_in_vars.push_back(var->grads_->var_);
M
minqiyang 已提交
171 172 173 174 175 176 177 178 179
        }
      }
    }

    for (auto it : grad_op_desc->Outputs()) {
      auto& grad_out_vars = op->grad_output_vars_[it.first];
      for (const std::string& grad_outvar : it.second) {
        block->FindRecursiveOrCreateVar(grad_outvar);
        auto var_it = grad_to_var->find(grad_outvar);
M
minqiyang 已提交
180 181 182 183
        PADDLE_ENFORCE(var_it != grad_to_var->end(),
                       "Could not found the grad op output var, should this "
                       "operator %s's stop gradient be True",
                       op_desc->Type());
M
minqiyang 已提交
184
        VarBase* var = vars[var_it->second];
M
minqiyang 已提交
185
        if (!var->grads_->var_->IsInitialized()) {
M
minqiyang 已提交
186
          InitVar(var->var_, var->grads_->var_, prepared_op.GetDeviceContext());
M
minqiyang 已提交
187 188 189
          LOG(ERROR) << "Init grad output " << it.first << " " << grad_outvar
                     << var->grads_->var_->GetMutable<framework::LoDTensor>()
                            ->mutable_data(platform::CPUPlace());
M
minqiyang 已提交
190
        }
M
minqiyang 已提交
191
        grad_out_vars.push_back(var->grads_->var_);
M
minqiyang 已提交
192 193 194 195 196 197 198
      }
    }
  }

  op->block_ = block;
}

199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231
std::vector<VarBase*> Tracer::PyTrace(OpBase* op,
                                      const std::vector<VarBase*>& inputs,
                                      bool stop_gradient) {
  VLOG(3) << "py_trace";
  op->input_vars_["X"] = inputs;
  op->output_vars_["Out"] = PyLayer::Apply(op->forward_id_, inputs);
  for (VarBase* inp : inputs) {
    if (inp->pre_op_) {
      op->pre_ops_["X"].push_back(inp->pre_op_);
      op->pre_ops_out_idx_["X"].push_back(inp->pre_op_out_idx_);
    } else {
      op->pre_ops_["X"].push_back(nullptr);
    }
  }

  auto& outputs = op->output_vars_["Out"];
  for (size_t i = 0; i < outputs.size(); ++i) {
    VarBase* out = outputs[i];
    out->stop_gradient_ = stop_gradient;
    out->pre_op_ = op;
    out->pre_op_out_name_ = "Out";
    out->pre_op_out_idx_ = i;
  }
  if (!stop_gradient) {
    auto& grad_input_vars = op->grad_input_vars_["X@GRAD"];
    auto& grad_output_vars = op->grad_output_vars_["Out@GRAD"];

    for (const VarBase* inp : inputs) {
      grad_input_vars.push_back(inp->var_);
    }
    for (VarBase* out : outputs) {
      grad_input_vars.push_back(out->var_);
    }
M
minqiyang 已提交
232 233

    platform::CPUPlace place;
234
    for (VarBase* out : outputs) {
M
minqiyang 已提交
235
      grad_input_vars.push_back(out->grads_->var_);
236
      if (!grad_input_vars.back()->IsInitialized()) {
M
minqiyang 已提交
237 238 239
        // TODO(minqiyang): Add GPU support for PyLayer, only support CPU now
        InitVar(out->var_, grad_input_vars.back(),
                platform::DeviceContextPool::Instance().Get(place));
240 241
      }
    }
M
minqiyang 已提交
242

243
    for (const VarBase* inp : inputs) {
M
minqiyang 已提交
244
      grad_output_vars.push_back(inp->grads_->var_);
245
      if (!grad_output_vars.back()->IsInitialized()) {
M
minqiyang 已提交
246 247 248
        // TODO(minqiyang): Add GPU support for PyLayer, only support CPU now
        InitVar(inp->var_, grad_output_vars.back(),
                platform::DeviceContextPool::Instance().Get(place));
249 250 251 252 253 254
      }
    }
  }
  return outputs;
}

M
minqiyang 已提交
255
}  // namespace imperative
256
}  // namespace paddle