提交 0d5819eb 编写于 作者: X Xin Pan

polish imperative codes

test=develop
上级 e33427da
......@@ -131,8 +131,9 @@ std::map<std::string, std::vector<VarBase*>> OpBase::ApplyGrad() {
std::map<std::string, std::vector<framework::Variable*>> grad_outputs;
if (backward_id_ > 0) {
VLOG(3) << "py_layer_grad";
grad_outputs["Out@GRAD"] =
PyLayer::ApplyGrad(backward_id_, grad_input_vars_["X@GRAD"]);
grad_outputs[framework::GradVarName(PyLayer::kFwdOut)] = PyLayer::ApplyGrad(
backward_id_,
grad_input_vars_[framework::GradVarName(PyLayer::kFwdInp)]);
} else {
VLOG(3) << "op grad " << grad_op_desc_->Type();
for (auto it : grad_output_vars_) {
......
......@@ -200,6 +200,9 @@ class PyLayer {
public:
virtual ~PyLayer() {}
static constexpr char* kFwdInp = "X";
static constexpr char* kFwdOut = "Out";
static void RegisterFunc(int func_id, const py::object& py_func);
static int NumFuncs();
......
......@@ -48,6 +48,7 @@ class Tracer {
std::vector<VarBase*> PyTrace(OpBase* op, const std::vector<VarBase*>& inputs,
bool stop_gradient = false);
private:
framework::BlockDesc* root_block_;
};
......
......@@ -54,6 +54,25 @@ class PyLayer(core.PyLayer):
def __init__(self):
super(PyLayer, self).__init__()
@classmethod
def _do_forward(cls, inputs):
return cls._to_tuple(cls.forward(inputs))
@classmethod
def _do_backward(cls, inputs):
return cls._to_tuple(cls.backward(inputs))
@staticmethod
def _to_tuple(inputs):
if not isinstance(inputs, list) and not isinstance(inputs, tuple):
inputs = [inputs]
ret = []
for inp in inputs:
tensor = core.LoDTensor()
tensor.set(inp, core.CPUPlace())
ret.append(tensor)
return tuple(ret)
@staticmethod
def forward(*inputs):
raise NotImplementedError
......@@ -70,16 +89,15 @@ class PyLayer(core.PyLayer):
if not hasattr(cls, 'forward_id'):
cls.forward_id = core.PyLayer.num_funcs() + 1
PyLayer.register_func(cls.forward_id, cls.forward)
PyLayer.register_func(cls.forward_id, cls._do_forward)
cls.backward_id = core.PyLayer.num_funcs() + 1
PyLayer.register_func(cls.backward_id, cls.backward)
PyLayer.register_func(cls.backward_id, cls._do_backward)
iop = core.OpBase()
iop.forward_id = cls.forward_id
iop.backward_id = cls.backward_id
block.ops.append(iop)
ivars = tracer.py_trace(iop, ivar_inputs, False)
# ivars = core.PyLayer.apply(cls.forward, inputs)
ret = []
for ivar in ivars:
tensor = ivar.value().get_tensor()
......
......@@ -41,26 +41,12 @@ class MyPyLayer(fluid.imperative.PyLayer):
@staticmethod
def forward(inputs):
sys.stderr.write('before forward\n')
ret = np.tanh(inputs[0])
sys.stderr.write('after forward: %s\n' % ret)
tensor = core.LoDTensor()
tensor.set(ret, core.CPUPlace())
return tuple([tensor])
return np.tanh(inputs[0])
@staticmethod
def backward(inputs):
sys.stderr.write('calling into backward: %s\n' % str(inputs))
inp, out, dout = inputs
inp = np.array(inp)
out = np.array(out)
dout = np.array(dout)
sys.stderr.write('calling into backward: %s, %s, %s\n' %
(inp, out, dout))
ret = np.array(dout) * (1 - np.square(np.array(out)))
tensor = core.LoDTensor()
tensor.set(ret, core.CPUPlace())
return tuple([tensor])
return np.array(dout) * (1 - np.square(np.array(out)))
class MLP(fluid.imperative.Layer):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册