提交 6bb84490 编写于 作者: M minqiyang

Fix imperative unit test

test=develop
上级 336160e6
......@@ -61,6 +61,9 @@ class Autograd {
for (size_t i = 0; i < input_grads.size(); ++i) {
if (!input_grads[i]) continue;
if (ready_op->input_vars_->at(i)->stop_gradient_) {
continue;
}
OpBase* pre_op = ready_op->pre_ops_->at(i);
if (!pre_op) continue;
......@@ -152,7 +155,7 @@ void VarBase::ApplyGrad(framework::Scope* scope, Variable* grad) {
}
std::vector<Variable*> OpBase::ApplyGrad(framework::Scope* scope) {
VLOG(3) << "op grad " << grad_op_desc_->Type();
VLOG(3) << "op grad type: " << grad_op_desc_->Type();
for (const std::string& grad_invar : grad_op_desc_->InputArgumentNames()) {
if (grad_to_var_->find(grad_invar) == grad_to_var_->end()) {
......
......@@ -93,6 +93,8 @@ class Tracer {
LOG(ERROR) << "tracer doesn't support yet";
}
}
outputs[i]->stop_gradient_ = stop_gradient;
outputs[i]->var_ = var;
outputs[i]->pre_op_ = op;
outputs[i]->pre_op_out_idx_ = i;
......@@ -106,6 +108,7 @@ class Tracer {
CreateGradOp(*op_desc, {}, {block}, &grad_op_desc, grad_to_var);
op->grad_op_desc_ = grad_op_desc;
op->grad_to_var_ = grad_to_var;
VLOG(3) << "tracer create grad op " << grad_op_desc->Type();
}
op->block_ = block;
}
......
......@@ -9666,14 +9666,15 @@ class FC(layers.PyLayer):
param_attr=None,
num_flatten_dims=1,
dtype=core.VarDesc.VarType.FP32):
super(FC, self).__init__()
super(FC, self).__init__(param_attr=param_attr)
self._size = size
self._num_flatten_dims = num_flatten_dims
self._dtype = dtype
self._helper = LayerHelper('FC', param_attr=param_attr)
self._tmp = self._helper.create_variable_for_type_inference(self._dtype)
self._out = self._helper.create_variable_for_type_inference(self._dtype)
def _build_once(self, inputs):
input_shape = inputs[0].shape
input_shape = inputs.shape
param_shape = [
reduce(lambda a, b: a * b, input_shape[self._num_flatten_dims:], 1)
] + [self._size]
......@@ -9684,21 +9685,20 @@ class FC(layers.PyLayer):
is_bias=False)
def forward(self, inputs):
tmp = self._helper.create_variable_for_type_inference(self._dtype)
self._helper.append_op(
type="mul",
inputs={"X": inputs[0],
inputs={"X": inputs,
"Y": self._w},
outputs={"Out": tmp},
outputs={"Out": self._tmp},
attrs={
"x_num_col_dims": self._num_flatten_dims,
"y_num_col_dims": 1
})
out = self._helper.create_variable_for_type_inference(self._dtype)
self._helper.append_op(
type="sum",
inputs={"X": [tmp]},
outputs={"Out": out},
inputs={"X": [self._tmp]},
outputs={"Out": self._out},
attrs={"use_mkldnn": False})
return out
return self._out
......@@ -36,7 +36,7 @@ class MyLayer(fluid.imperative.PyLayer):
super(MyLayer, self).__init__()
def forward(self, inputs):
x = fluid.layers.relu(inputs[0])
x = fluid.layers.relu(inputs)
self._x_for_debug = x
return [fluid.layers.elementwise_mul(x, x)]
......@@ -52,7 +52,7 @@ class MLP(fluid.imperative.PyLayer):
initializer=fluid.initializer.Constant(value=0.1)))
def forward(self, inputs):
x = self._fc1(inputs[0])
x = self._fc1(inputs)
x = self._fc2(x)
x = fluid.layers.reduce_sum(x)
return x
......@@ -64,13 +64,14 @@ class TestImperative(unittest.TestCase):
cl = core.Layer()
cl.forward([])
l = fluid.imperative.PyLayer()
l.forward([])
self.assertRaises(NotImplementedError, l.forward, [])
def test_layer_in_out(self):
np_inp = np.array([1.0, 2.0, -1.0], dtype=np.float32)
with fluid.imperative.guard():
var_inp = fluid.imperative.base.to_variable(np_inp)
l = MyLayer()
x = l(np_inp)[0]
x = l(var_inp)[0]
self.assertIsNotNone(x)
dy_out = x._numpy()
x._backward()
......@@ -95,8 +96,9 @@ class TestImperative(unittest.TestCase):
def test_mlp(self):
np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)
with fluid.imperative.guard():
var_inp = fluid.imperative.base.to_variable(np_inp)
mlp = MLP()
out = mlp(np_inp)
out = mlp(var_inp)
dy_out = out._numpy()
out._backward()
dy_grad = mlp._fc1._w._gradient()
......
......@@ -101,7 +101,7 @@ class TestImperativeMnist(unittest.TestCase):
mnist = MNIST()
sgd = SGDOptimizer(learning_rate=1e-3)
for i in range(1):
for i in range(2):
x_data = np.random.rand(128, 1, 28, 28).astype('float32')
img = to_variable(x_data)
y_data = np.random.rand(128, 1).astype('int64')
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册