tracer.cc 10.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/fluid/imperative/tracer.h"

M
minqiyang 已提交
17 18 19 20
#include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/platform/device_context.h"
#include "paddle/fluid/platform/enforce.h"

21
namespace paddle {
M
minqiyang 已提交
22 23 24 25 26
namespace imperative {

void CreateGradOp(const framework::OpDesc& op_desc,
                  const std::unordered_set<std::string>& no_grad_set,
                  const std::vector<framework::BlockDesc*>& grad_sub_block,
X
Xin Pan 已提交
27
                  std::vector<framework::OpDesc*>* grad_op_descs,
M
minqiyang 已提交
28
                  std::unordered_map<std::string, std::string>* grad_to_var) {
X
Xin Pan 已提交
29 30
  PADDLE_ENFORCE(grad_op_descs->empty());
  std::vector<std::unique_ptr<framework::OpDesc>> descs =
M
minqiyang 已提交
31 32 33
      framework::OpInfoMap::Instance()
          .Get(op_desc.Type())
          .GradOpMaker()(op_desc, no_grad_set, grad_to_var, grad_sub_block);
J
JiabinYang 已提交
34

X
Xin Pan 已提交
35 36 37
  for (auto& desc : descs) {
    grad_op_descs->emplace_back(desc.release());
  }
M
minqiyang 已提交
38 39
}

M
minqiyang 已提交
40 41 42 43
void InitVar(framework::Variable* var, framework::Variable* grad_var,
             platform::DeviceContext* dev_ctx) {
  PADDLE_ENFORCE_NOT_NULL(dev_ctx,
                          "Could not get valid device from forward op");
M
minqiyang 已提交
44
  auto& var_t = var->Get<framework::LoDTensor>();
M
minqiyang 已提交
45 46 47
  grad_var->GetMutable<framework::LoDTensor>()->mutable_data<float>(
      var_t.dims(), dev_ctx->GetPlace());
  operators::math::set_constant(
M
minqiyang 已提交
48
      *dev_ctx, grad_var->GetMutable<framework::LoDTensor>(), 0.0);
M
minqiyang 已提交
49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66
}

platform::Place GetExpectedPlace(platform::Place place, VarBasePtrMap inputs) {
  platform::Place result = place;
  for (auto it : inputs) {
    for (VarBase* var : it.second) {
      platform::Place tmp_place =
          var->var_->Get<framework::LoDTensor>().place();
      if (!platform::is_same_place(tmp_place, result)) {
        PADDLE_THROW(
            "Input variable should keep in the same place: %s, but get place: "
            "%s of input %s instead",
            result, tmp_place, it.first);
      }
    }
  }

  return result;
M
minqiyang 已提交
67 68
}

M
minqiyang 已提交
69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87
// framework::BlockDesc* InferShapeAndVarType(OpBase* op, const VarBasePtrMap&
// inputs, const VarBasePtrMap& outputs) {
// std::unique_ptr<BlockDesc> block(new BlockDesc());

// // construct op desc
// op->op_desc_ = block.AppendOp();

// // construct op inputs and outputs
// // for
// //
// for (auto it = )
// op->op_desc_->SetInput()

// op->op_desc_->InferShape(*block);
// op->op_desc_->InferVarType(block.get());

// return block.release();
// }

M
minqiyang 已提交
88 89
void Tracer::Trace(OpBase* op, const VarBasePtrMap& inputs,
                   const VarBasePtrMap& outputs, framework::BlockDesc* block,
M
minqiyang 已提交
90
                   const platform::Place expected_place,
M
minqiyang 已提交
91 92 93
                   const bool stop_gradient) {
  std::map<std::string, VarBase*> vars;

M
minqiyang 已提交
94 95
  // framework::BlockDesc* block = InferShapeAndVarType(op, inputs, outputs);

M
minqiyang 已提交
96 97 98 99
  framework::OpDesc* op_desc = op->op_desc_;
  VLOG(3) << "tracer tracing " << op_desc->Type();
  op_desc->InferShape(*block);
  op_desc->InferVarType(block);
M
minqiyang 已提交
100

M
minqiyang 已提交
101 102 103 104 105 106 107 108 109
  std::unique_ptr<framework::OperatorBase> op_base =
      framework::OpRegistry::CreateOp(*op_desc);

  framework::VariableValueMap invars_map;
  framework::VariableValueMap outvars_map;

  op->input_vars_ = inputs;
  for (auto it : op->input_vars_) {
    auto& invars = invars_map[it.first];
M
minqiyang 已提交
110
    invars.reserve(it.second.size());
M
minqiyang 已提交
111
    for (VarBase* inp : it.second) {
M
minqiyang 已提交
112
      PADDLE_ENFORCE_NOT_NULL(inp->var_, "op %s input %s nullptr",
M
minqiyang 已提交
113 114
                              op->op_desc_->Type(), inp->var_desc_->Name());

M
minqiyang 已提交
115
      invars.emplace_back(inp->var_);
M
minqiyang 已提交
116
      vars[inp->var_desc_->Name()] = inp;
M
minqiyang 已提交
117
      if (inp->PreOp() && !inp->IsStopGradient()) {
X
Xin Pan 已提交
118 119
        op->pre_ops_[it.first].push_back(inp->PreOp());
        op->pre_ops_out_idx_[it.first].push_back(inp->PreOpOutIdx());
M
minqiyang 已提交
120 121 122 123 124 125 126 127 128 129 130 131
      } else {
        op->pre_ops_[it.first].push_back(nullptr);
      }
      VLOG(3) << "input vname " << inp->var_desc_->Name() << " "
              << inp->var_->IsInitialized();
    }
  }

  op->output_vars_ = outputs;
  for (auto it : op->output_vars_) {
    auto& outvars = outvars_map[it.first];
    const std::vector<VarBase*>& outputs = it.second;
M
minqiyang 已提交
132
    outvars.reserve(outputs.size());
M
minqiyang 已提交
133 134
    for (size_t i = 0; i < outputs.size(); ++i) {
      VarBase* out = outputs[i];
M
minqiyang 已提交
135
      outvars.emplace_back(out->var_);
M
minqiyang 已提交
136 137 138 139 140 141 142 143
      vars[out->var_desc_->Name()] = out;

      framework::VarDesc* var_desc = block->FindVar(out->var_desc_->Name());
      if (var_desc->GetType() == framework::proto::VarType::LOD_TENSOR) {
        out->var_->GetMutable<framework::LoDTensor>();
      } else {
        LOG(ERROR) << "tracer doesn't support yet";
      }
X
Xin Pan 已提交
144
      out->TrackPreOp(op, it.first, i, stop_gradient);
M
minqiyang 已提交
145 146 147 148 149 150 151 152 153 154 155 156 157 158 159

      VLOG(3) << "output vname " << out->var_desc_->Name() << " "
              << out->var_->IsInitialized();
    }
  }

  VLOG(3) << "tracer running " << op_desc->Type();
  framework::RuntimeContext ctx(invars_map, outvars_map);

  // TODO(panyx0718): Cache p.
  framework::OperatorWithKernel* op_kernel =
      dynamic_cast<framework::OperatorWithKernel*>(op_base.get());
  PADDLE_ENFORCE_NOT_NULL(op_kernel, "only support op with kernel");

  framework::Scope scope;
P
Paddle CI 已提交
160 161 162
  op->place_ = GetExpectedPlace(expected_place, inputs);
  PreparedOp prepared_op = PreparedOp::Prepare(ctx, *op_kernel, op->place_);
  prepared_op.op.RuntimeInferShape(scope, op->place_, ctx);
M
minqiyang 已提交
163 164
  prepared_op.func(framework::ExecutionContext(
      prepared_op.op, scope, *prepared_op.dev_ctx, prepared_op.ctx));
M
minqiyang 已提交
165 166

  if (!stop_gradient) {
167 168
    std::unique_ptr<std::unordered_map<std::string, std::string>> grad_to_var(
        new std::unordered_map<std::string, std::string>());
X
Xin Pan 已提交
169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205
    CreateGradOp(*op_desc, {}, {block}, &op->grad_op_descs_, grad_to_var.get());

    op->grad_input_vars_.resize(op->grad_op_descs_.size());
    op->grad_output_vars_.resize(op->grad_op_descs_.size());
    for (size_t i = 0; i < op->grad_op_descs_.size(); ++i) {
      framework::OpDesc* grad_op_desc = op->grad_op_descs_[i];
      for (auto it : grad_op_desc->Inputs()) {
        auto& grad_in_vars = op->grad_input_vars_[i][it.first];
        for (const std::string& grad_invar : it.second) {
          block->FindRecursiveOrCreateVar(grad_invar);
          auto var_it = grad_to_var->find(grad_invar);
          if (var_it == grad_to_var->end()) {
            auto fwd_var_it = vars.find(grad_invar);
            PADDLE_ENFORCE(fwd_var_it != vars.end());
            // Forward inputs or outputs.
            grad_in_vars.push_back(fwd_var_it->second->var_);
          } else {
            VarBase* var = vars[var_it->second];
            if (!var->grads_->var_->IsInitialized()) {
              InitVar(var->var_, var->grads_->var_,
                      prepared_op.GetDeviceContext());
            }
            // Douts.
            grad_in_vars.push_back(var->grads_->var_);
          }
        }
      }

      for (auto it : grad_op_desc->Outputs()) {
        auto& grad_out_vars = op->grad_output_vars_[i][it.first];
        for (const std::string& grad_outvar : it.second) {
          block->FindRecursiveOrCreateVar(grad_outvar);
          auto var_it = grad_to_var->find(grad_outvar);
          PADDLE_ENFORCE(var_it != grad_to_var->end(),
                         "Could not found the grad op output var, should this "
                         "operator %s's stop gradient be True",
                         op_desc->Type());
M
minqiyang 已提交
206
          VarBase* var = vars[var_it->second];
M
minqiyang 已提交
207
          if (!var->grads_->var_->IsInitialized()) {
M
minqiyang 已提交
208 209
            InitVar(var->var_, var->grads_->var_,
                    prepared_op.GetDeviceContext());
M
minqiyang 已提交
210
          }
X
Xin Pan 已提交
211
          grad_out_vars.push_back(var->grads_->var_);
M
minqiyang 已提交
212 213 214 215 216 217 218 219
        }
      }
    }
  }

  op->block_ = block;
}

220 221 222 223
std::vector<VarBase*> Tracer::PyTrace(OpBase* op,
                                      const std::vector<VarBase*>& inputs,
                                      bool stop_gradient) {
  VLOG(3) << "py_trace";
X
Xin Pan 已提交
224 225
  op->input_vars_[PyLayer::kFwdInp] = inputs;
  op->output_vars_[PyLayer::kFwdOut] = PyLayer::Apply(op->forward_id_, inputs);
226
  for (VarBase* inp : inputs) {
M
minqiyang 已提交
227
    if (inp->PreOp() && !inp->IsStopGradient()) {
X
Xin Pan 已提交
228 229
      op->pre_ops_[PyLayer::kFwdInp].push_back(inp->PreOp());
      op->pre_ops_out_idx_[PyLayer::kFwdInp].push_back(inp->PreOpOutIdx());
230
    } else {
X
Xin Pan 已提交
231
      op->pre_ops_[PyLayer::kFwdInp].push_back(nullptr);
232 233 234
    }
  }

X
Xin Pan 已提交
235
  auto& outputs = op->output_vars_[PyLayer::kFwdOut];
236 237
  for (size_t i = 0; i < outputs.size(); ++i) {
    VarBase* out = outputs[i];
X
Xin Pan 已提交
238
    out->TrackPreOp(op, PyLayer::kFwdOut, i, stop_gradient);
239 240
  }
  if (!stop_gradient) {
X
Xin Pan 已提交
241 242
    op->grad_input_vars_.resize(1);
    op->grad_output_vars_.resize(1);
X
Xin Pan 已提交
243
    auto& grad_input_vars =
X
Xin Pan 已提交
244
        op->grad_input_vars_[0][framework::GradVarName(PyLayer::kFwdInp)];
X
Xin Pan 已提交
245
    auto& grad_output_vars =
X
Xin Pan 已提交
246
        op->grad_output_vars_[0][framework::GradVarName(PyLayer::kFwdOut)];
247 248 249 250 251 252 253

    for (const VarBase* inp : inputs) {
      grad_input_vars.push_back(inp->var_);
    }
    for (VarBase* out : outputs) {
      grad_input_vars.push_back(out->var_);
    }
M
minqiyang 已提交
254 255

    platform::CPUPlace place;
256
    for (VarBase* out : outputs) {
M
minqiyang 已提交
257
      grad_input_vars.push_back(out->grads_->var_);
258
      if (!grad_input_vars.back()->IsInitialized()) {
M
minqiyang 已提交
259 260 261
        // TODO(minqiyang): Add GPU support for PyLayer, only support CPU now
        InitVar(out->var_, grad_input_vars.back(),
                platform::DeviceContextPool::Instance().Get(place));
262 263
      }
    }
M
minqiyang 已提交
264

265
    for (const VarBase* inp : inputs) {
M
minqiyang 已提交
266
      grad_output_vars.push_back(inp->grads_->var_);
267
      if (!grad_output_vars.back()->IsInitialized()) {
M
minqiyang 已提交
268 269 270
        // TODO(minqiyang): Add GPU support for PyLayer, only support CPU now
        InitVar(inp->var_, grad_output_vars.back(),
                platform::DeviceContextPool::Instance().Get(place));
271 272 273 274 275 276
      }
    }
  }
  return outputs;
}

M
minqiyang 已提交
277
}  // namespace imperative
278
}  // namespace paddle