提交 b80fe826 编写于 作者: X Xin Pan

polish

test=develop
上级 93c16d96
......@@ -13,7 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */
syntax = "proto2";
// option optimize_for = LITE_RUNTIME;
option optimize_for = LITE_RUNTIME;
package paddle.framework.proto;
// Any incompatible changes to ProgramDesc and its dependencies should
......
......@@ -43,24 +43,31 @@ void CreateGradOp(const framework::OpDesc& op_desc,
class Tracer {
public:
Tracer() {}
explicit Tracer(framework::BlockDesc* root_block) : root_block_(root_block) {
root_scope_ = new framework::Scope();
scopes_[root_block_] = root_scope_;
}
virtual ~Tracer() { delete root_scope_; }
void Trace(OpBase* op, const std::vector<VarBase*>& inputs,
const std::vector<VarBase*>& outputs) {
const std::vector<VarBase*>& outputs,
framework::BlockDesc* block) {
framework::Scope* scope = GetScope(block);
framework::OpDesc* op_desc = op->op_desc_;
LOG(ERROR) << "tracer tracing " << op_desc->Type();
op_desc->InferShape(*block_);
op_desc->InferVarType(block_);
op_desc->InferShape(*block);
op_desc->InferVarType(block);
std::unique_ptr<framework::OperatorBase> op_base =
framework::OpRegistry::CreateOp(*op_desc);
*op->input_vars_ = inputs;
for (VarBase* input : inputs) {
const std::string vname = input->var_desc_->Name();
framework::Variable* var = scope_->Var(vname);
framework::Variable* var = scope->Var(vname);
input->var_ = var;
if (!var->IsInitialized()) {
framework::VarDesc* var_desc = block_->FindVar(vname);
framework::VarDesc* var_desc = block->FindVar(vname);
if (var_desc->GetType() == framework::proto::VarType::LOD_TENSOR) {
var->GetMutable<framework::LoDTensor>();
} else {
......@@ -78,9 +85,9 @@ class Tracer {
*op->output_vars_ = outputs;
for (size_t i = 0; i < outputs.size(); ++i) {
const std::string vname = outputs[i]->var_desc_->Name();
framework::Variable* var = scope_->Var(vname);
framework::Variable* var = scope->Var(vname);
if (!var->IsInitialized()) {
framework::VarDesc* var_desc = block_->FindVar(vname);
framework::VarDesc* var_desc = block->FindVar(vname);
if (var_desc->GetType() == framework::proto::VarType::LOD_TENSOR) {
var->GetMutable<framework::LoDTensor>();
} else {
......@@ -91,28 +98,30 @@ class Tracer {
outputs[i]->pre_op_ = op;
outputs[i]->pre_op_out_idx_ = i;
}
op_base->Run(*scope_, platform::CPUPlace());
op_base->Run(*scope, platform::CPUPlace());
framework::OpDesc* grad_op_desc;
auto grad_to_var = new std::unordered_map<std::string, std::string>();
CreateGradOp(*op_desc, {}, {block_}, &grad_op_desc, grad_to_var);
CreateGradOp(*op_desc, {}, {block}, &grad_op_desc, grad_to_var);
op->grad_op_desc_ = grad_op_desc;
op->grad_to_var_ = grad_to_var;
op->block_ = block_;
op->block_ = block;
}
void SetScope(framework::Scope* scope) { scope_ = scope; }
void SetBlock(framework::BlockDesc* block) { block_ = block; }
framework::Scope* Scope() const { return scope_; }
framework::BlockDesc* Block() const { return block_; }
framework::Scope* GetScope(framework::BlockDesc* block) {
if (scopes_.find(block) != scopes_.end()) {
return scopes_.at(block);
}
framework::BlockDesc* parent_block = block->ParentBlock();
PADDLE_ENFORCE(scopes_.find(parent_block) != scopes_.end());
framework::Scope* scope = &scopes_[parent_block]->NewScope();
scopes_[block] = scope;
return scope;
}
private:
framework::BlockDesc* block_;
framework::Scope* scope_;
std::vector<Runnable*> runnables_;
std::map<framework::BlockDesc*, framework::Scope*> scopes_;
framework::BlockDesc* root_block_;
framework::Scope* root_scope_;
};
} // namespace imperative
......
......@@ -23,20 +23,13 @@ namespace pybind {
// Bind Methods
void BindTracer(pybind11::module *m) {
pybind11::class_<imperative::Tracer>(*m, "Tracer", "")
.def(pybind11::init<>())
.def("__init__",
[](imperative::Tracer &self, framework::BlockDesc *root_block) {
new (&self) imperative::Tracer(root_block);
})
.def("trace", &imperative::Tracer::Trace)
.def_property("scope",
[](const imperative::Tracer &self) { return self.Scope(); },
[](imperative::Tracer &self, framework::Scope *scope) {
self.SetScope(scope);
},
R"DOC()DOC")
.def_property("block",
[](const imperative::Tracer &self) { return self.Block(); },
[](imperative::Tracer &self, framework::BlockDesc *block) {
self.SetBlock(block);
},
R"DOC()DOC");
.def("get_scope", &imperative::Tracer::GetScope,
pybind11::return_value_policy::reference);
}
} // namespace pybind
......
......@@ -358,11 +358,13 @@ class Variable(core.VarBase):
self.stop_gradient = stop_gradient
self.is_data = is_data
def numpy(self, scope):
def numpy(self):
scope = _imperative_tracer().get_scope(self.block.desc)
tensor = core.get_variable_tensor(scope, self.desc.name())
return np.array(tensor)
def backward(self, scope):
def backward(self):
scope = _imperative_tracer().get_scope(self.block.desc)
self._run_backward(scope)
def grad(self):
......@@ -668,14 +670,14 @@ class Operator(core.OpBase):
for inp in inputs.values():
if isinstance(inp, Variable):
input_vars.append(inp)
elif isinstance(inp, list):
elif isinstance(inp, list) or isinstance(inp, tuple):
input_vars.extend(inp[:])
self.inputs = input_vars
output_vars = []
for out in outputs.values():
if isinstance(out, Variable):
output_vars.append(out)
elif isinstance(inp, list):
elif isinstance(out, list) or isinstance(out, tuple):
output_vars.extend(out[:])
self.outputs = output_vars
......@@ -1246,7 +1248,7 @@ class Block(object):
if _in_imperative_mode():
op_desc = core.OpDesc()
op = Operator(block=self, desc=op_desc, *args, **kwargs)
_imperative_tracer().trace(op, op.inputs, op.outputs)
_imperative_tracer().trace(op, op.inputs, op.outputs, self.desc)
else:
op_desc = self.desc.append_op()
op = Operator(block=self, desc=op_desc, *args, **kwargs)
......@@ -2257,9 +2259,9 @@ def _get_var(name, program=None):
@contextlib.contextmanager
def _imperative_guard():
def _imperative_guard(tracer):
global _imperative_tracer_
tmp_trace = _imperative_tracer_
_imperative_tracer_ = core.Tracer()
_imperative_tracer_ = tracer
yield
_imperative_tracer_ = tmp_trace
......@@ -12,10 +12,12 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import numpy as np
from paddle.fluid import core
from paddle.fluid import framework
__all__ = ['enabled', 'guard']
__all__ = ['enabled', 'guard', 'to_variable']
def enabled():
......@@ -26,8 +28,29 @@ def enabled():
def guard():
train = framework.Program()
startup = framework.Program()
tracer = core.Tracer(train.current_block().desc)
with framework.program_guard(train, startup):
with framework.unique_name.guard():
with framework._imperative_guard():
with framework._imperative_guard(tracer):
yield
# TODO: check train, startup not changed.
def to_variable(value, block=None):
if isinstance(value, np.ndarray):
if not block:
block = framework.default_main_program().current_block()
py_var = framework.Variable(
block,
type=core.VarDesc.VarType.LOD_TENSOR,
name=None,
shape=value.shape,
dtype=value.dtype)
scope = framework._imperative_tracer().get_scope(block.desc)
var = scope.var(py_var.name)
tensor = var.get_tensor()
tensor.set(value, core.CPUPlace())
return py_var
elif isinstance(value, framework.Variable):
return value
else:
raise ValueError("Unsupported type %s" % type(value))
......@@ -18,51 +18,32 @@ import numpy as np
from paddle.fluid import core
from paddle.fluid import framework
from paddle.fluid.imperative import base
__all__ = ['PyLayer']
@contextlib.contextmanager
def trace_scope(scope, block):
tmp_scope = framework._imperative_tracer().scope
tmp_block = framework._imperative_tracer().block
framework._imperative_tracer().scope = scope
framework._imperative_tracer().block = block
yield
framework._imperative_tracer().scope = tmp_scope
framework._imperative_tracer().block = tmp_block
class PyLayer(core.Layer):
def __init__(self):
self._scope = core.Scope()
self._block = framework.default_main_program().current_block()
pass
def __call__(self, inputs):
with trace_scope(self._scope, self._block.desc):
if not isinstance(inputs, list) and not isinstance(inputs, tuple):
inputs = [inputs]
var_inputs = []
for x in inputs:
if isinstance(x, np.ndarray):
py_var = framework.Variable(
self._block,
type=core.VarDesc.VarType.LOD_TENSOR,
name=None,
shape=x.shape,
dtype=x.dtype)
var = self._scope.var(py_var.name)
tensor = var.get_tensor()
tensor.set(x, core.CPUPlace())
var_inputs.append(py_var)
elif isinstance(x, framework.Variable):
var_inputs.append(x)
else:
raise ValueError("not var or ndarray %s" % type(x))
outputs = self.forward(var_inputs)
return outputs
# TODO(panyx0718): Support declarative mode as well.
assert base.enabled()
if not isinstance(inputs, list) and not isinstance(inputs, tuple):
inputs = [inputs]
var_inputs = []
for x in inputs:
if isinstance(x, np.ndarray):
py_var = base.to_variable(x)
var_inputs.append(py_var)
elif isinstance(x, framework.Variable):
var_inputs.append(x)
else:
raise ValueError("not var or ndarray %s" % type(x))
outputs = self.forward(var_inputs)
return outputs
def forward(self, inputs):
print("at python.")
return []
......@@ -23,6 +23,7 @@ import numpy as np
from .framework import Variable, Parameter, default_main_program, default_startup_program, dtype_is_floating
from . import unique_name
from paddle.fluid.initializer import Constant, Xavier
from paddle.fluid.imperative import base
from .param_attr import ParamAttr, WeightNormParamAttr
from . import core
from six.moves import zip
......@@ -62,7 +63,7 @@ class LayerHelper(object):
if isinstance(x, Variable):
return x
elif isinstance(x, np.ndarray):
return self._np_to_variable(x)
return base.to_variable(x, self.main_program.current_block())
else:
raise ValueError("inputs wrong type %s\n" % x)
......
......@@ -17,7 +17,6 @@ All layers just related to the neural network.
from __future__ import print_function
import sys
import numpy as np
import os
from ..layer_helper import LayerHelper
......
......@@ -43,8 +43,8 @@ class TestImperative(unittest.TestCase):
l = MyLayer()
x = l(np.array([1.0, 2.0, -1.0], dtype=np.float32))[0]
self.assertIsNotNone(x)
sys.stderr.write("%s output: %s\n" % (x, x.numpy(scope=l._scope)))
x.backward(l._scope)
sys.stderr.write("%s output: %s\n" % (x, x.numpy()))
x.backward()
sys.stderr.write("grad %s\n" % l._x_for_debug.grad())
......
......@@ -101,6 +101,7 @@ packages=['paddle',
'paddle.dataset',
'paddle.reader',
'paddle.fluid',
'paddle.fluid.imperative',
'paddle.fluid.proto',
'paddle.fluid.proto.profiler',
'paddle.fluid.layers',
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册