提交 9597fd05 编写于 作者: X Xin Pan

polish

test=develop
上级 7aad6afd
......@@ -128,26 +128,23 @@ std::map<std::string, std::vector<VarBase*>> OpBase::ApplyGrad() {
return {};
}
std::vector<std::unique_ptr<framework::Variable>> tmp_vars;
std::map<std::string, std::vector<framework::Variable*>> grad_outputs;
if (backward_id_ > 0) {
VLOG(3) << "py_layer_grad";
grad_outputs["Out@GRAD"] =
PyLayer::ApplyGrad(backward_id_, grad_input_vars_["X@GRAD"]);
} else {
VLOG(3) << "op grad " << grad_op_desc_->Type();
for (auto it : grad_output_vars_) {
auto& outputs = grad_outputs[it.first];
for (size_t i = 0; i < it.second.size(); ++i) {
// Allocate a new variable
Variable* tmp_var = new framework::Variable();
tmp_var->GetMutable<framework::LoDTensor>();
tmp_vars.emplace_back(tmp_var);
outputs.push_back(tmp_var);
}
}
if (backward_id_ > 0) {
VLOG(3) << "py_layer_grad";
PyLayer::ApplyGrad(backward_id_, grad_input_vars_["X@GRAD"],
&(grad_outputs["Out@GRAD"]));
} else {
VLOG(3) << "op grad " << grad_op_desc_->Type();
framework::RuntimeContext ctx(grad_input_vars_, grad_outputs);
// No need to do compile time infer shape here.
......@@ -170,10 +167,13 @@ std::map<std::string, std::vector<VarBase*>> OpBase::ApplyGrad() {
for (auto it : grad_output_vars_) {
auto& outputs = grad_outputs[it.first];
auto& origin_outputs = it.second;
PADDLE_ENFORCE_EQ(outputs.size(), origin_outputs.size());
for (size_t i = 0; i < outputs.size(); ++i) {
framework::Variable* grad = outputs[i];
framework::Variable* orig_grad = origin_outputs[i];
AddTo(outputs[i], orig_grad);
AddTo(grad, orig_grad);
delete grad;
}
}
return input_vars_;
......@@ -197,30 +197,60 @@ void PyLayer::RegisterFunc(int func_id, const py::object& py_func) {
py_funcs_[func_id] = py_func;
}
int PyLayer::NumFuncs() { return py_funcs_.size(); }
std::vector<VarBase*> PyLayer::Apply(int func_id,
const std::vector<VarBase*>& inputs) {
std::vector<framework::LoDTensor> tensor_inputs;
std::vector<VarBase*> ret;
std::vector<framework::Variable*> invars;
for (const VarBase* in : inputs) {
tensor_inputs.push_back(in->var_->Get<framework::LoDTensor>());
invars.push_back(in->var_);
}
PADDLE_ENFORCE(py_funcs_.find(func_id) != py_funcs_.end());
CallPythonFunc(py_funcs_[func_id], tensor_inputs, &ret);
std::vector<Variable*> outvars = CallPythonFunc(py_funcs_[func_id], invars);
std::vector<VarBase*> ret;
for (Variable* v : outvars) {
ret.push_back(new VarBase(v, new Variable()));
}
return ret;
}
void PyLayer::ApplyGrad(int func_id,
const std::vector<framework::Variable*>& inputs,
std::vector<framework::Variable*>* outputs) {
std::vector<framework::LoDTensor> tensor_inputs;
std::vector<VarBase*> ret;
std::vector<Variable*> PyLayer::ApplyGrad(
int func_id, const std::vector<framework::Variable*>& inputs) {
PADDLE_ENFORCE(py_funcs_.find(func_id) != py_funcs_.end());
return CallPythonFunc(py_funcs_[func_id], inputs);
}
for (const Variable* in : inputs) {
tensor_inputs.push_back(in->Get<framework::LoDTensor>());
std::vector<framework::Variable*> PyLayer::CallPythonFunc(
const py::object& callable, const std::vector<framework::Variable*>& ins) {
py::gil_scoped_acquire guard;
py::tuple in_args(ins.size());
for (size_t i = 0; i < ins.size(); ++i) {
const framework::LoDTensor& t = ins[i]->Get<framework::LoDTensor>();
in_args[i] = t.IsInitialized() ? py::cast(t) : py::cast(nullptr);
}
PADDLE_ENFORCE(py_funcs_.find(func_id) != py_funcs_.end());
CallPythonFunc(py_funcs_[func_id], tensor_inputs, outputs);
VLOG(3) << "pyfunc in " << py::len(in_args);
// TODO(panyx0718): Who owns the returned LoDTensor.
auto ret = callable(in_args);
auto ret_tuple = py::cast<py::tuple>(ret);
size_t ret_num = py::len(ret_tuple);
std::vector<framework::Variable*> outs;
VLOG(3) << "pyfunc out " << ret_num;
for (size_t i = 0; i < ret_num; ++i) {
try {
auto* py_out_tensor = py::cast<framework::LoDTensor*>(ret_tuple[i]);
PADDLE_ENFORCE_NOT_NULL(py_out_tensor,
"Output tensor %d should not be nullptr", i);
auto* var = new framework::Variable();
auto* tensor = var->GetMutable<framework::LoDTensor>();
tensor->ShareDataWith(*py_out_tensor);
tensor->set_lod(py_out_tensor->lod());
outs.push_back(var);
} catch (py::cast_error&) {
PADDLE_THROW("The %d-th output must be LoDTensor", i);
}
}
return outs;
}
} // namespace imperative
......
......@@ -87,12 +87,15 @@ class OpBase;
class VarBase {
public:
VarBase()
VarBase() : VarBase(new framework::Variable(), new framework::Variable()) {}
// Owns `var` and `grad`
VarBase(framework::Variable* var, framework::Variable* grad)
: pre_op_(nullptr),
pre_op_out_idx_(-1),
var_desc_(nullptr),
var_(new framework::Variable()),
grads_(new framework::Variable()),
var_(var),
grads_(grad),
stop_gradient_(false) {}
explicit VarBase(bool stop_gradient)
......@@ -131,8 +134,8 @@ class OpBase {
public:
OpBase()
: op_desc_(nullptr),
grad_op_desc_(nullptr),
forward_id_(-1),
grad_op_desc_(nullptr),
backward_id_(-1) {}
virtual ~OpBase() {
......@@ -141,10 +144,13 @@ class OpBase {
std::map<std::string, std::vector<VarBase*>> ApplyGrad();
// One of `op_desc_` or `forward_id_` is set, not both.
// For pure python PyLayer, use `forward_id_`, otherwise, use op_desc_.
framework::OpDesc* op_desc_;
framework::OpDesc* grad_op_desc_;
int forward_id_;
// When has backward, one of `grad_op_desc_` or `backward_id_` is set,
// not both.
framework::OpDesc* grad_op_desc_;
int backward_id_;
std::map<std::string, std::vector<VarBase*>> input_vars_;
......@@ -167,76 +173,23 @@ class Layer {
}
};
static void CallPythonFunc(const py::object& callable,
const std::vector<framework::LoDTensor>& ins,
std::vector<VarBase*>* outs) {
py::gil_scoped_acquire guard;
py::tuple in_args(ins.size());
for (size_t i = 0; i < ins.size(); ++i) {
in_args[i] = ins[i].IsInitialized() ? py::cast(ins[i]) : py::cast(nullptr);
}
// TODO(panyx0718): Who owns the returned LoDTensor.
auto ret = callable(in_args);
auto ret_tuple = py::cast<py::tuple>(ret);
size_t ret_num = py::len(ret_tuple);
for (size_t i = 0; i < ret_num; ++i) {
try {
auto* py_out_tensor = py::cast<framework::LoDTensor*>(ret_tuple[i]);
PADDLE_ENFORCE_NOT_NULL(py_out_tensor,
"Output tensor %d should not be nullptr", i);
VarBase* var = new VarBase();
auto* tensor = var->var_->GetMutable<framework::LoDTensor>();
tensor->ShareDataWith(*py_out_tensor);
tensor->set_lod(py_out_tensor->lod());
outs->push_back(var);
} catch (py::cast_error&) {
PADDLE_THROW("The %d-th output must be LoDTensor", i);
}
}
}
static void CallPythonFunc(const py::object& callable,
const std::vector<framework::LoDTensor>& ins,
std::vector<framework::Variable*>* outs) {
py::gil_scoped_acquire guard;
py::tuple in_args(ins.size());
for (size_t i = 0; i < ins.size(); ++i) {
in_args[i] = ins[i].IsInitialized() ? py::cast(ins[i]) : py::cast(nullptr);
}
VLOG(3) << "pyfunc in " << py::len(in_args);
// TODO(panyx0718): Who owns the returned LoDTensor.
auto ret = callable(in_args);
auto ret_tuple = py::cast<py::tuple>(ret);
size_t ret_num = py::len(ret_tuple);
VLOG(3) << "pyfunc out " << ret_num;
for (size_t i = 0; i < ret_num; ++i) {
try {
auto* py_out_tensor = py::cast<framework::LoDTensor*>(ret_tuple[i]);
PADDLE_ENFORCE_NOT_NULL(py_out_tensor,
"Output tensor %d should not be nullptr", i);
auto* tensor = (*outs)[i]->GetMutable<framework::LoDTensor>();
tensor->ShareDataWith(*py_out_tensor);
tensor->set_lod(py_out_tensor->lod());
} catch (py::cast_error&) {
PADDLE_THROW("The %d-th output must be LoDTensor", i);
}
}
}
class PyLayer {
public:
virtual ~PyLayer() {}
static void RegisterFunc(int func_id, const py::object& py_func);
static int NumFuncs();
static std::vector<VarBase*> Apply(int func_id,
const std::vector<VarBase*>& inputs);
static void ApplyGrad(int func_id,
const std::vector<framework::Variable*>& inputs,
std::vector<framework::Variable*>* outputs);
static std::vector<framework::Variable*> ApplyGrad(
int func_id, const std::vector<framework::Variable*>& inputs);
private:
static std::vector<framework::Variable*> CallPythonFunc(
const py::object& callable, const std::vector<framework::Variable*>& ins);
};
} // namespace imperative
......
......@@ -132,8 +132,9 @@ class Tracer {
if (!stop_gradient) {
framework::OpDesc* grad_op_desc;
// TODO(panyx): Is this leaked?
auto grad_to_var = new std::unordered_map<std::string, std::string>();
CreateGradOp(*op_desc, {}, {block}, &grad_op_desc, grad_to_var);
std::unique_ptr<std::unordered_map<std::string, std::string>> grad_to_var(
new std::unordered_map<std::string, std::string>());
CreateGradOp(*op_desc, {}, {block}, &grad_op_desc, grad_to_var.get());
op->grad_op_desc_ = grad_op_desc;
for (auto it : grad_op_desc->Inputs()) {
......
......@@ -191,7 +191,7 @@ PYBIND11_MODULE(core, m) {
return self.Forward(inputs);
});
py::class_<paddle::imperative::PyLayer>(m, "PyLayer")
py::class_<imperative::PyLayer>(m, "PyLayer")
.def(py::init<>())
.def_static(
"apply",
......@@ -200,9 +200,11 @@ PYBIND11_MODULE(core, m) {
return imperative::PyLayer::Apply(func_id, inputs);
},
py::return_value_policy::take_ownership)
.def_static("register_func", [](int func_id, const py::object &callable) {
.def_static("register_func",
[](int func_id, const py::object &callable) {
imperative::PyLayer::RegisterFunc(func_id, callable);
});
})
.def_static("num_funcs", &imperative::PyLayer::NumFuncs);
BindTracer(&m);
......
......@@ -68,12 +68,15 @@ class PyLayer(core.PyLayer):
block = framework.default_main_program().current_block()
inputs = [x._ivar for x in inputs]
PyLayer.register_func(1, cls.forward)
PyLayer.register_func(2, cls.backward)
if not hasattr(cls, 'forward_id'):
cls.forward_id = core.PyLayer.num_funcs() + 1
PyLayer.register_func(cls.forward_id, cls.forward)
cls.backward_id = core.PyLayer.num_funcs() + 1
PyLayer.register_func(cls.backward_id, cls.backward)
iop = core.OpBase()
iop.forward_id = 1
iop.backward_id = 2
iop.forward_id = cls.forward_id
iop.backward_id = cls.backward_id
block.ops.append(iop)
ivars = tracer.py_trace(iop, inputs, False)
# ivars = core.PyLayer.apply(cls.forward, inputs)
......
......@@ -81,14 +81,52 @@ class MLP(fluid.imperative.Layer):
class TestImperative(unittest.TestCase):
"""
def test_layer(self):
with fluid.imperative.guard():
cl = core.Layer()
cl.forward([])
l = fluid.imperative.Layer()
self.assertRaises(NotImplementedError, l.forward, [])
"""
def test_pylayer_func_id(self):
with fluid.imperative.guard():
class PyLayer1(fluid.imperative.PyLayer):
def __init__(self):
super(PyLayer1, self).__init__()
@staticmethod
def forward(inputs):
return inputs
@staticmethod
def backward(inputs):
return inputs
class PyLayer2(fluid.imperative.PyLayer):
def __init__(self):
super(PyLayer2, self).__init__()
@staticmethod
def forward(inputs):
return inputs
@staticmethod
def backward(inputs):
return inputs
py_layer_1 = PyLayer1()
py_layer_2 = PyLayer2()
py_layer_1([fluid.imperative.base.to_variable(np.ones([2, 2]))])
py_layer_2([fluid.imperative.base.to_variable(np.ones([2, 2]))])
id = py_layer_1.forward_id
self.assertGreater(id, 0)
self.assertEqual(py_layer_1.backward_id, id + 1)
self.assertEqual(py_layer_2.forward_id, id + 2)
self.assertEqual(py_layer_2.backward_id, id + 3)
py_layer_1([fluid.imperative.base.to_variable(np.ones([2, 2]))])
self.assertEqual(py_layer_1.forward_id, id)
def test_pylayer(self):
np_inp = np.ones([2, 2], np.float32)
......@@ -118,7 +156,6 @@ class TestImperative(unittest.TestCase):
self.assertTrue(np.allclose(dy_out, static_out))
self.assertTrue(np.allclose(dy_grad, static_grad))
"""
def test_layer_in_out(self):
np_inp = np.array([1.0, 2.0, -1.0], dtype=np.float32)
with fluid.imperative.guard():
......@@ -172,7 +209,6 @@ class TestImperative(unittest.TestCase):
self.assertTrue(np.allclose(dy_out, static_out))
self.assertTrue(np.allclose(dy_grad, static_grad))
"""
if __name__ == '__main__':
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册