提交 61491ce2 编写于 作者: X Xin Pan

clean

test=develop
上级 ce7e503c
...@@ -182,7 +182,7 @@ void OperatorBase::Run(const Scope& scope, const platform::Place& place) { ...@@ -182,7 +182,7 @@ void OperatorBase::Run(const Scope& scope, const platform::Place& place) {
void OperatorBase::Run(const RuntimeContext& ctx, void OperatorBase::Run(const RuntimeContext& ctx,
const platform::Place& place) { const platform::Place& place) {
RunImpl(ctx, place); RunImplPrepared(ctx, place);
} }
bool OperatorBase::HasInputs(const std::string& name) const { bool OperatorBase::HasInputs(const std::string& name) const {
...@@ -959,9 +959,9 @@ void OperatorWithKernel::RunImpl(const Scope& scope, ...@@ -959,9 +959,9 @@ void OperatorWithKernel::RunImpl(const Scope& scope,
} }
} }
void OperatorWithKernel::RunImpl(const RuntimeContext& ctx, void OperatorWithKernel::RunImplPrepared(const RuntimeContext& ctx,
const platform::Place& place) const { const platform::Place& place) const {
Scope scope; Scope dummy_scope;
platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance(); platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
auto* dev_ctx = pool.Get(place); auto* dev_ctx = pool.Get(place);
...@@ -976,7 +976,7 @@ void OperatorWithKernel::RunImpl(const RuntimeContext& ctx, ...@@ -976,7 +976,7 @@ void OperatorWithKernel::RunImpl(const RuntimeContext& ctx,
OpKernelMap& kernels = kernels_iter->second; OpKernelMap& kernels = kernels_iter->second;
auto expected_kernel_key = this->GetExpectedKernelType( auto expected_kernel_key = this->GetExpectedKernelType(
ExecutionContext(*this, scope, *dev_ctx, ctx)); ExecutionContext(*this, dummy_scope, *dev_ctx, ctx));
VLOG(3) << "expected_kernel_key:" << expected_kernel_key; VLOG(3) << "expected_kernel_key:" << expected_kernel_key;
auto kernel_iter = kernels.find(expected_kernel_key); auto kernel_iter = kernels.find(expected_kernel_key);
...@@ -999,9 +999,9 @@ void OperatorWithKernel::RunImpl(const RuntimeContext& ctx, ...@@ -999,9 +999,9 @@ void OperatorWithKernel::RunImpl(const RuntimeContext& ctx,
dev_ctx = pool.Get(expected_kernel_key.place_); dev_ctx = pool.Get(expected_kernel_key.place_);
} }
RuntimeInferShapeContext infer_shape_ctx(*this, scope, ctx); RuntimeInferShapeContext infer_shape_ctx(*this, dummy_scope, ctx);
this->InferShape(&infer_shape_ctx); this->InferShape(&infer_shape_ctx);
kernel_iter->second(ExecutionContext(*this, scope, *dev_ctx, ctx)); kernel_iter->second(ExecutionContext(*this, dummy_scope, *dev_ctx, ctx));
} }
void OperatorWithKernel::TransferInplaceVarsBack( void OperatorWithKernel::TransferInplaceVarsBack(
......
...@@ -173,8 +173,10 @@ class OperatorBase { ...@@ -173,8 +173,10 @@ class OperatorBase {
virtual void RunImpl(const Scope& scope, virtual void RunImpl(const Scope& scope,
const platform::Place& place) const = 0; const platform::Place& place) const = 0;
virtual void RunImpl(const RuntimeContext& ctx, virtual void RunImplPrepared(const RuntimeContext& ctx,
const platform::Place& place) const {} const platform::Place& place) const {
PADDLE_THROW("%s doesn't support RunPreparedImpl", Type());
}
}; };
class ExecutionContext { class ExecutionContext {
...@@ -466,8 +468,8 @@ class OperatorWithKernel : public OperatorBase { ...@@ -466,8 +468,8 @@ class OperatorWithKernel : public OperatorBase {
// same. // same.
proto::VarType::Type IndicateDataType(const ExecutionContext& ctx) const; proto::VarType::Type IndicateDataType(const ExecutionContext& ctx) const;
void RunImpl(const Scope& scope, const platform::Place& place) const final; void RunImpl(const Scope& scope, const platform::Place& place) const final;
void RunImpl(const RuntimeContext& ctx, void RunImplPrepared(const RuntimeContext& ctx,
const platform::Place& place) const final; const platform::Place& place) const final;
/** /**
* Transfer data from scope to a transfered scope. If there is no data need to * Transfer data from scope to a transfered scope. If there is no data need to
......
...@@ -31,11 +31,6 @@ using framework::Variable; ...@@ -31,11 +31,6 @@ using framework::Variable;
void AddTo(Variable* src, Variable* dst) { void AddTo(Variable* src, Variable* dst) {
framework::LoDTensor* dst_tensor = dst->GetMutable<framework::LoDTensor>(); framework::LoDTensor* dst_tensor = dst->GetMutable<framework::LoDTensor>();
framework::LoDTensor* src_tensor = src->GetMutable<framework::LoDTensor>(); framework::LoDTensor* src_tensor = src->GetMutable<framework::LoDTensor>();
VLOG(3) << "apply var grad " << src_tensor->data<float>()[0] << " "
<< src_tensor->data<float>()[1] << " "
<< src_tensor->data<float>()[2];
PADDLE_ENFORCE(dst_tensor->numel() == src_tensor->numel(), "%lld vs %lld", PADDLE_ENFORCE(dst_tensor->numel() == src_tensor->numel(), "%lld vs %lld",
dst_tensor->numel(), src_tensor->numel()); dst_tensor->numel(), src_tensor->numel());
float* dst_data = dst_tensor->mutable_data<float>(platform::CPUPlace()); float* dst_data = dst_tensor->mutable_data<float>(platform::CPUPlace());
...@@ -43,10 +38,6 @@ void AddTo(Variable* src, Variable* dst) { ...@@ -43,10 +38,6 @@ void AddTo(Variable* src, Variable* dst) {
for (size_t i = 0; i < src_tensor->numel(); ++i) { for (size_t i = 0; i < src_tensor->numel(); ++i) {
dst_data[i] += src_data[i]; dst_data[i] += src_data[i];
} }
VLOG(3) << "apply var dst grad " << dst_tensor->data<float>()[0] << " "
<< dst_tensor->data<float>()[1] << " "
<< dst_tensor->data<float>()[2];
} }
class Autograd { class Autograd {
...@@ -55,16 +46,10 @@ class Autograd { ...@@ -55,16 +46,10 @@ class Autograd {
void RunBackward(VarBase* var) { void RunBackward(VarBase* var) {
PADDLE_ENFORCE(var->pre_op_->op_desc_); PADDLE_ENFORCE(var->pre_op_->op_desc_);
// TODO(panyx0718): Only create for vars that "require_grad" PADDLE_ENFORCE(
LOG(ERROR) << reinterpret_cast<void*>(var->grads_) << " vs " var->grads_ ==
<< reinterpret_cast<void*>( var->pre_op_->output_vars_[var->pre_op_out_name_][var->pre_op_out_idx_]
var->pre_op_ ->grads_);
->output_vars_[var->pre_op_out_name_]
[var->pre_op_out_idx_]
->grads_);
var->pre_op_->output_vars_[var->pre_op_out_name_][var->pre_op_out_idx_]
->grads_->GetMutable<framework::LoDTensor>()
->ShareDataWith(var->grads_->Get<framework::LoDTensor>());
std::deque<OpBase*> ready; std::deque<OpBase*> ready;
ready.push_back(var->pre_op_); ready.push_back(var->pre_op_);
...@@ -76,7 +61,6 @@ class Autograd { ...@@ -76,7 +61,6 @@ class Autograd {
ready.pop_front(); ready.pop_front();
std::map<std::string, std::vector<VarBase*>> input_grads = std::map<std::string, std::vector<VarBase*>> input_grads =
ready_op->ApplyGrad(); ready_op->ApplyGrad();
VLOG(3) << "after apply grad";
for (auto it : input_grads) { for (auto it : input_grads) {
const std::vector<VarBase*>& ingrads = it.second; const std::vector<VarBase*>& ingrads = it.second;
...@@ -160,17 +144,12 @@ std::map<std::string, std::vector<VarBase*>> OpBase::ApplyGrad() { ...@@ -160,17 +144,12 @@ std::map<std::string, std::vector<VarBase*>> OpBase::ApplyGrad() {
for (size_t i = 0; i < it.second.size(); ++i) { for (size_t i = 0; i < it.second.size(); ++i) {
outputs.push_back(new framework::Variable()); outputs.push_back(new framework::Variable());
outputs.back()->GetMutable<framework::LoDTensor>(); outputs.back()->GetMutable<framework::LoDTensor>();
/*
auto& accum_grad_t = it.second[i]->Get<framework::LoDTensor>();
Variable* grad_var = outputs.back();
float* data = grad_var->GetMutable<framework::LoDTensor>()
->mutable_data<float>(accum_grad_t.dims(), platform::CPUPlace());
std::fill(data, data + accum_grad_t.numel(), 0.0);*/
} }
} }
framework::RuntimeContext ctx(grad_input_vars_, grad_outputs); framework::RuntimeContext ctx(grad_input_vars_, grad_outputs);
// No need to do static infer shape here.
// grad_op_desc_->InferShape(*block_); // grad_op_desc_->InferShape(*block_);
grad_op_desc_->InferVarType(block_); grad_op_desc_->InferVarType(block_);
...@@ -184,7 +163,6 @@ std::map<std::string, std::vector<VarBase*>> OpBase::ApplyGrad() { ...@@ -184,7 +163,6 @@ std::map<std::string, std::vector<VarBase*>> OpBase::ApplyGrad() {
for (size_t i = 0; i < outputs.size(); ++i) { for (size_t i = 0; i < outputs.size(); ++i) {
framework::Variable* orig_grad = origin_outputs[i]; framework::Variable* orig_grad = origin_outputs[i];
AddTo(outputs[i], orig_grad); AddTo(outputs[i], orig_grad);
VLOG(3) << "done add to " << grad_op_desc_->Outputs().at(it.first)[i];
} }
} }
return input_vars_; return input_vars_;
......
...@@ -20,7 +20,6 @@ ...@@ -20,7 +20,6 @@
#include "paddle/fluid/framework/op_desc.h" #include "paddle/fluid/framework/op_desc.h"
#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/scope.h"
#include "paddle/fluid/imperative/engine.h" #include "paddle/fluid/imperative/engine.h"
#include "paddle/fluid/imperative/layer.h" #include "paddle/fluid/imperative/layer.h"
...@@ -53,19 +52,14 @@ class Tracer { ...@@ -53,19 +52,14 @@ class Tracer {
public: public:
explicit Tracer(framework::BlockDesc* root_block, explicit Tracer(framework::BlockDesc* root_block,
framework::BlockDesc* startup_block) framework::BlockDesc* startup_block)
: root_block_(root_block), startup_block_(startup_block) { : root_block_(root_block), startup_block_(startup_block) {}
root_scope_ = new framework::Scope();
scopes_[root_block_] = root_scope_;
scopes_[startup_block_] = root_scope_;
}
virtual ~Tracer() { delete root_scope_; } virtual ~Tracer() {}
void Trace(OpBase* op, void Trace(OpBase* op,
const std::map<std::string, std::vector<VarBase*>>& inputs, const std::map<std::string, std::vector<VarBase*>>& inputs,
const std::map<std::string, std::vector<VarBase*>>& outputs, const std::map<std::string, std::vector<VarBase*>>& outputs,
framework::BlockDesc* block) { framework::BlockDesc* block) {
// framework::Scope* scope = GetScope(block);
std::map<std::string, VarBase*> vars; std::map<std::string, VarBase*> vars;
framework::OpDesc* op_desc = op->op_desc_; framework::OpDesc* op_desc = op->op_desc_;
...@@ -94,8 +88,7 @@ class Tracer { ...@@ -94,8 +88,7 @@ class Tracer {
(*op->pre_ops_)[it.first].push_back(nullptr); (*op->pre_ops_)[it.first].push_back(nullptr);
} }
VLOG(3) << "input vname " << inp->var_desc_->Name() << " " VLOG(3) << "input vname " << inp->var_desc_->Name() << " "
<< inp->var_->Get<framework::LoDTensor>().dims().size() << inp->var_->IsInitialized();
<< reinterpret_cast<void*>(inp->var_);
} }
} }
...@@ -119,8 +112,6 @@ class Tracer { ...@@ -119,8 +112,6 @@ class Tracer {
out->pre_op_out_idx_ = i; out->pre_op_out_idx_ = i;
VLOG(3) << "output vname " << out->var_desc_->Name() << " " VLOG(3) << "output vname " << out->var_desc_->Name() << " "
<< out->var_->Get<framework::LoDTensor>().dims().size() << " "
<< reinterpret_cast<void*>(out->var_) << " "
<< out->var_->IsInitialized(); << out->var_->IsInitialized();
} }
} }
...@@ -167,7 +158,6 @@ class Tracer { ...@@ -167,7 +158,6 @@ class Tracer {
if (!var->grads_->IsInitialized()) { if (!var->grads_->IsInitialized()) {
InitVar(var->var_, var->grads_); InitVar(var->var_, var->grads_);
} }
LOG(ERROR) << grad_outvar << " map to " << var->var_desc_->Name();
grad_out_vars.push_back(var->grads_); grad_out_vars.push_back(var->grads_);
} }
} }
...@@ -175,22 +165,9 @@ class Tracer { ...@@ -175,22 +165,9 @@ class Tracer {
op->block_ = block; op->block_ = block;
} }
framework::Scope* GetScope(framework::BlockDesc* block) {
if (scopes_.find(block) != scopes_.end()) {
return scopes_.at(block);
}
framework::BlockDesc* parent_block = block->ParentBlock();
PADDLE_ENFORCE(scopes_.find(parent_block) != scopes_.end());
framework::Scope* scope = &scopes_[parent_block]->NewScope();
scopes_[block] = scope;
return scope;
}
private: private:
std::map<framework::BlockDesc*, framework::Scope*> scopes_;
framework::BlockDesc* root_block_; framework::BlockDesc* root_block_;
framework::BlockDesc* startup_block_; framework::BlockDesc* startup_block_;
framework::Scope* root_scope_;
}; };
} // namespace imperative } // namespace imperative
......
...@@ -69,8 +69,8 @@ class FillConstantOp : public framework::OperatorBase { ...@@ -69,8 +69,8 @@ class FillConstantOp : public framework::OperatorBase {
math::set_constant(dev_ctx, tensor, value); math::set_constant(dev_ctx, tensor, value);
} }
void RunImpl(const framework::RuntimeContext &ctx, void RunImplPrepared(const framework::RuntimeContext &ctx,
const platform::Place &dev_place) const override { const platform::Place &dev_place) const override {
auto data_type = auto data_type =
static_cast<framework::proto::VarType::Type>(Attr<int>("dtype")); static_cast<framework::proto::VarType::Type>(Attr<int>("dtype"));
auto value = Attr<float>("value"); auto value = Attr<float>("value");
......
...@@ -28,9 +28,7 @@ void BindTracer(pybind11::module *m) { ...@@ -28,9 +28,7 @@ void BindTracer(pybind11::module *m) {
framework::BlockDesc *startup_block) { framework::BlockDesc *startup_block) {
new (&self) imperative::Tracer(root_block, startup_block); new (&self) imperative::Tracer(root_block, startup_block);
}) })
.def("trace", &imperative::Tracer::Trace) .def("trace", &imperative::Tracer::Trace);
.def("get_scope", &imperative::Tracer::GetScope,
pybind11::return_value_policy::reference);
} }
} // namespace pybind } // namespace pybind
......
...@@ -20,7 +20,6 @@ from __future__ import print_function ...@@ -20,7 +20,6 @@ from __future__ import print_function
import numpy as np import numpy as np
import six import six
import os import os
import sys
import inspect import inspect
from ..layer_helper import LayerHelper from ..layer_helper import LayerHelper
from ..initializer import Normal, Constant from ..initializer import Normal, Constant
...@@ -9683,7 +9682,6 @@ class FC(layers.PyLayer): ...@@ -9683,7 +9682,6 @@ class FC(layers.PyLayer):
shape=param_shape, shape=param_shape,
dtype=self._dtype, dtype=self._dtype,
is_bias=False) is_bias=False)
sys.stderr.write('created w: %s\n' % self._w.name)
def forward(self, inputs): def forward(self, inputs):
tmp = self._helper.create_variable_for_type_inference(self._dtype) tmp = self._helper.create_variable_for_type_inference(self._dtype)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册