提交 52e5ee60 编写于 作者: M minqiyang

Add debug info

上级 7e7b4500
...@@ -175,7 +175,7 @@ std::unique_ptr<VarBase> VarBase::NewVarBase(const platform::Place& dst_place, ...@@ -175,7 +175,7 @@ std::unique_ptr<VarBase> VarBase::NewVarBase(const platform::Place& dst_place,
PADDLE_ENFORCE(var_->IsInitialized(), PADDLE_ENFORCE(var_->IsInitialized(),
"Variable must be initialized when getting numpy tensor"); "Variable must be initialized when getting numpy tensor");
std::unique_ptr<VarBase> new_var(new VarBase()); std::unique_ptr<VarBase> new_var(new VarBase("NewVarBase"));
framework::LoDTensor* tensor = framework::LoDTensor* tensor =
new_var->var_->GetMutable<framework::LoDTensor>(); new_var->var_->GetMutable<framework::LoDTensor>();
tensor->Resize(var_->Get<framework::LoDTensor>().dims()); tensor->Resize(var_->Get<framework::LoDTensor>().dims());
...@@ -303,7 +303,7 @@ std::vector<VarBase*> PyLayer::Apply(int func_id, ...@@ -303,7 +303,7 @@ std::vector<VarBase*> PyLayer::Apply(int func_id,
std::vector<Variable*> outvars = CallPythonFunc(py_funcs_[func_id], invars); std::vector<Variable*> outvars = CallPythonFunc(py_funcs_[func_id], invars);
std::vector<VarBase*> ret; std::vector<VarBase*> ret;
for (Variable* v : outvars) { for (Variable* v : outvars) {
ret.push_back(new VarBase(v, new VarBase(true))); ret.push_back(new VarBase(v, new VarBase("PYLAYER_XGRAD", true), ""));
} }
return ret; return ret;
} }
......
...@@ -103,26 +103,30 @@ class OpBase; ...@@ -103,26 +103,30 @@ class OpBase;
*/ */
class VarBase { class VarBase {
public: public:
VarBase() : VarBase(new framework::Variable(), new VarBase(true)) {} VarBase(std::string name) : VarBase(new framework::Variable(), new VarBase(name + "XGRAD", true), name) {}
// Owns `var` and `grad` // Owns `var` and `grad`
VarBase(framework::Variable* var, VarBase* grad) VarBase(framework::Variable* var, VarBase* grad, std::string name)
: var_desc_(nullptr), : var_desc_(nullptr),
var_(var), var_(var),
grads_(grad), grads_(grad),
stop_gradient_(false), stop_gradient_(false),
pre_op_(nullptr), pre_op_(nullptr),
pre_op_out_idx_(-1) {} pre_op_out_idx_(-1),
name_(name) { LOG(ERROR) << "create " << name; }
explicit VarBase(bool stop_gradient) explicit VarBase(std::string name, bool stop_gradient)
: var_desc_(nullptr), : var_desc_(nullptr),
var_(new framework::Variable()), var_(new framework::Variable()),
grads_(stop_gradient ? nullptr : new VarBase(true)), grads_(stop_gradient ? nullptr : new VarBase(name + "XGRAD", true)),
stop_gradient_(stop_gradient), stop_gradient_(stop_gradient),
pre_op_(nullptr), pre_op_(nullptr),
pre_op_out_idx_(-1) {} pre_op_out_idx_(-1),
name_(name) { LOG(ERROR) << "create " << name; }
virtual ~VarBase() { virtual ~VarBase() {
LOG(ERROR) << "delete " << name_;
if (var_) { if (var_) {
delete var_; delete var_;
} }
...@@ -183,6 +187,7 @@ class VarBase { ...@@ -183,6 +187,7 @@ class VarBase {
OpBase* pre_op_; OpBase* pre_op_;
std::string pre_op_out_name_; std::string pre_op_out_name_;
int pre_op_out_idx_; int pre_op_out_idx_;
std::string name_;
}; };
/* The wrapper for OpDesc which holds a OpDesc and a OpDesc of its /* The wrapper for OpDesc which holds a OpDesc and a OpDesc of its
......
...@@ -137,7 +137,7 @@ PYBIND11_MODULE(core, m) { ...@@ -137,7 +137,7 @@ PYBIND11_MODULE(core, m) {
py::class_<imperative::VarBase>(m, "VarBase", R"DOC()DOC") py::class_<imperative::VarBase>(m, "VarBase", R"DOC()DOC")
// .def(py::init<>()) // .def(py::init<>())
.def(py::init<bool>(), py::arg("stop_gradient") = false) .def(py::init<std::string, bool>(), py::arg("stop_gradient") = false, py::arg("name") = "")
.def("_run_backward", .def("_run_backward",
[](imperative::VarBase &self) { self.RunBackward(); }) [](imperative::VarBase &self) { self.RunBackward(); })
.def("_grad_name", &imperative::VarBase::GradName) .def("_grad_name", &imperative::VarBase::GradName)
......
...@@ -306,6 +306,10 @@ class Variable(object): ...@@ -306,6 +306,10 @@ class Variable(object):
if name is None: if name is None:
name = unique_name.generate('_generated_var') name = unique_name.generate('_generated_var')
# print("create var", name)
# import sys
# sys.stdout.flush()
is_new_var = False is_new_var = False
name = cpt.to_text(name) name = cpt.to_text(name)
self.desc = self.block.desc.find_var(cpt.to_bytes(name)) self.desc = self.block.desc.find_var(cpt.to_bytes(name))
...@@ -383,7 +387,7 @@ class Variable(object): ...@@ -383,7 +387,7 @@ class Variable(object):
if _in_imperative_mode(): if _in_imperative_mode():
self._ivar = kwargs.get("ivar", None) self._ivar = kwargs.get("ivar", None)
if not self._ivar: if not self._ivar:
self._ivar = core.VarBase() self._ivar = core.VarBase(name, stop_gradient)
self._ivar.desc = self.desc self._ivar.desc = self.desc
self._ivar.stop_gradient = stop_gradient self._ivar.stop_gradient = stop_gradient
...@@ -1269,6 +1273,7 @@ class Block(object): ...@@ -1269,6 +1273,7 @@ class Block(object):
return var return var
def _remove_var(self, name): def _remove_var(self, name):
if not _in_imperative_mode():
self._sync_with_cpp() self._sync_with_cpp()
self.desc._remove_var(cpt.to_bytes(name)) self.desc._remove_var(cpt.to_bytes(name))
del self.vars[name] del self.vars[name]
...@@ -1353,6 +1358,7 @@ class Block(object): ...@@ -1353,6 +1358,7 @@ class Block(object):
Returns: Returns:
None None
""" """
if not _in_imperative_mode():
self._sync_with_cpp() self._sync_with_cpp()
self.desc._remove_op(index, index + 1) self.desc._remove_op(index, index + 1)
del self.ops[index] del self.ops[index]
......
...@@ -101,7 +101,7 @@ class MNIST(fluid.imperative.Layer): ...@@ -101,7 +101,7 @@ class MNIST(fluid.imperative.Layer):
class TestImperativeMnist(unittest.TestCase): class TestImperativeMnist(unittest.TestCase):
def test_mnist_float32(self): def test_mnist_float32(self):
seed = 90 seed = 90
batch_num = 2 batch_num = 100000
with fluid.imperative.guard(): with fluid.imperative.guard():
fluid.default_startup_program().random_seed = seed fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed fluid.default_main_program().random_seed = seed
...@@ -125,85 +125,109 @@ class TestImperativeMnist(unittest.TestCase): ...@@ -125,85 +125,109 @@ class TestImperativeMnist(unittest.TestCase):
label = to_variable(y_data) label = to_variable(y_data)
label._stop_gradient = True label._stop_gradient = True
print("forward start")
cost = mnist(img) cost = mnist(img)
loss = fluid.layers.cross_entropy(cost, label) loss = fluid.layers.cross_entropy(cost, label)
avg_loss = fluid.layers.mean(loss) avg_loss = fluid.layers.mean(loss)
dy_out = avg_loss._numpy() # dy_out = avg_loss._numpy()
print("forward end")
if batch_id == 0: # if batch_id == 0:
for param in fluid.default_main_program().global_block( # for param in fluid.default_main_program().global_block(
).all_parameters(): # ).all_parameters():
dy_param_init_value[param.name] = param._numpy() # dy_param_init_value[param.name] = param._numpy()
avg_loss._backward() avg_loss._backward()
sgd.minimize(avg_loss)
mnist.clear_gradients()
dy_param_value = {}
for param in fluid.default_main_program().global_block(
).all_parameters():
dy_param_value[param.name] = param._numpy()
with new_program_scope():
fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed
exe = fluid.Executor(fluid.CPUPlace(
) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0))
mnist = MNIST() print("backward end")
sgd = SGDOptimizer(learning_rate=1e-3)
train_reader = paddle.batch(
paddle.dataset.mnist.train(), batch_size=128)
img = fluid.layers.data(
name='pixel', shape=[1, 28, 28], dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
cost = mnist(img)
loss = fluid.layers.cross_entropy(cost, label)
avg_loss = fluid.layers.mean(loss)
sgd.minimize(avg_loss) sgd.minimize(avg_loss)
# initialize params and fetch them print("sgd end")
static_param_init_value = {}
static_param_name_list = []
for param in fluid.default_startup_program().global_block(
).all_parameters():
static_param_name_list.append(param.name)
out = exe.run(fluid.default_startup_program(), mnist.clear_gradients()
fetch_list=static_param_name_list)
for i in range(len(static_param_name_list)):
static_param_init_value[static_param_name_list[i]] = out[i]
for batch_id, data in enumerate(train_reader()):
if batch_id >= batch_num:
break
static_x_data = np.array(
[x[0].reshape(1, 28, 28) for x in data]).astype('float32')
y_data = np.array([x[1] for x in data]).astype('int64').reshape(
[128, 1])
fetch_list = [avg_loss.name]
fetch_list.extend(static_param_name_list)
out = exe.run(fluid.default_main_program(),
feed={"pixel": static_x_data,
"label": y_data},
fetch_list=fetch_list)
static_param_value = {}
static_out = out[0]
for i in range(1, len(out)):
static_param_value[static_param_name_list[i - 1]] = out[i]
for key, value in six.iteritems(static_param_init_value):
self.assertTrue(np.allclose(value, dy_param_init_value[key]))
self.assertTrue(np.allclose(static_out, dy_out))
for key, value in six.iteritems(static_param_value): import gc
self.assertTrue(np.allclose(value, dy_param_value[key])) for name, var in fluid.default_main_program().global_block().vars.items():
if not var.persistable:
fluid.default_main_program().global_block()._remove_var(name)
# var._ivar._clear_values()
for op in fluid.default_main_program().global_block().ops:
fluid.default_main_program().global_block()._remove_op(op.idx)
assert len(gc.get_referrers(avg_loss)) == 1
print("clear end")
print("ivar ref ", gc.get_referrers(gc.get_referrers(avg_loss._ivar)[0])[0].__class__.__name__)
print("ivar ref ", gc.get_referrers(gc.get_referrers(avg_loss._ivar)[1])[0].__class__.__name__)
# dy_param_value = {}
# for param in fluid.default_main_program().global_block(
# ).all_parameters():
# dy_param_value[param.name] = param._numpy()
# with new_program_scope():
# fluid.default_startup_program().random_seed = seed
# fluid.default_main_program().random_seed = seed
# exe = fluid.Executor(fluid.CPUPlace(
# ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0))
# mnist = MNIST()
# sgd = SGDOptimizer(learning_rate=1e-3)
# train_reader = paddle.batch(
# paddle.dataset.mnist.train(), batch_size=128)
# img = fluid.layers.data(
# name='pixel', shape=[1, 28, 28], dtype='float32')
# label = fluid.layers.data(name='label', shape=[1], dtype='int64')
# cost = mnist(img)
# loss = fluid.layers.cross_entropy(cost, label)
# avg_loss = fluid.layers.mean(loss)
# sgd.minimize(avg_loss)
# # initialize params and fetch them
# static_param_init_value = {}
# static_param_name_list = []
# for param in fluid.default_startup_program().global_block(
# ).all_parameters():
# static_param_name_list.append(param.name)
# out = exe.run(fluid.default_startup_program(),
# fetch_list=static_param_name_list)
# for i in range(len(static_param_name_list)):
# static_param_init_value[static_param_name_list[i]] = out[i]
# for batch_id, data in enumerate(train_reader()):
# if batch_id >= batch_num:
# break
# static_x_data = np.array(
# [x[0].reshape(1, 28, 28) for x in data]).astype('float32')
# y_data = np.array([x[1] for x in data]).astype('int64').reshape(
# [128, 1])
# fetch_list = [avg_loss.name]
# fetch_list.extend(static_param_name_list)
# out = exe.run(fluid.default_main_program(),
# feed={"pixel": static_x_data,
# "label": y_data},
# fetch_list=fetch_list)
# static_param_value = {}
# static_out = out[0]
# for i in range(1, len(out)):
# static_param_value[static_param_name_list[i - 1]] = out[i]
# for key, value in six.iteritems(static_param_init_value):
# self.assertTrue(np.allclose(value, dy_param_init_value[key]))
# self.assertTrue(np.allclose(static_out, dy_out))
# for key, value in six.iteritems(static_param_value):
# self.assertTrue(np.allclose(value, dy_param_value[key]))
if __name__ == '__main__': if __name__ == '__main__':
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册