提交 d3fc3d55 编写于 作者: L lujun

move internal function, test=develop

上级 2b32302b
......@@ -200,7 +200,7 @@ class TestImperative(unittest.TestCase):
inputs.append(fluid.dygraph.base.to_variable(x))
ret = fluid.layers.sums(inputs)
loss = fluid.layers.reduce_sum(ret)
loss._backward()
loss.backward()
self.assertTrue(np.allclose(ret.numpy(), x * 10))
self.assertTrue(np.allclose(inputs[0].gradient(), x))
......@@ -258,7 +258,7 @@ class TestImperative(unittest.TestCase):
var_inp = fluid.dygraph.base.to_variable(np_inp)
outs = my_py_layer(var_inp)
dy_out = np.sum(outs[0].numpy())
outs[0]._backward()
outs[0].backward()
dy_grad = var_inp.gradient()
with new_program_scope():
......@@ -288,7 +288,7 @@ class TestImperative(unittest.TestCase):
x = l(var_inp)[0]
self.assertIsNotNone(x)
dy_out = x.numpy()
x._backward()
x.backward()
dy_grad = l._x_for_debug.gradient()
with new_program_scope():
......@@ -315,7 +315,7 @@ class TestImperative(unittest.TestCase):
mlp = MLP("mlp")
out = mlp(var_inp)
dy_out = out.numpy()
out._backward()
out.backward()
dy_grad = mlp._fc1._w.gradient()
with new_program_scope():
......@@ -359,7 +359,7 @@ class TestImperative(unittest.TestCase):
simple_rnn = SimpleRNN("simple_rnn")
outs, pre_hiddens = simple_rnn.forward(var_inp)
dy_out = outs[3].numpy()
outs[3]._backward()
outs[3].backward()
dy_grad_h2o = simple_rnn._cell._h2o_w.gradient()
dy_grad_h2h = simple_rnn._cell._h2h_w.gradient()
dy_grad_i2h = simple_rnn._cell._i2h_w.gradient()
......
......@@ -252,7 +252,7 @@ class TestDygraphDeepCF(unittest.TestCase):
fluid.layers.log_loss(prediction,
to_variable(labels_np[
slice:slice + BATCH_SIZE])))
loss._backward()
loss.backward()
adam.minimize(loss)
deepcf.clear_gradients()
dy_loss = loss.numpy()
......
......@@ -150,7 +150,7 @@ class TestDygraphGAN(unittest.TestCase):
x=d_fake, label=to_variable(np.zeros([2, 1], np.float32))))
d_loss = d_loss_real + d_loss_fake
d_loss._backward()
d_loss.backward()
sgd.minimize(d_loss)
discriminator.clear_gradients()
generator.clear_gradients()
......@@ -160,7 +160,7 @@ class TestDygraphGAN(unittest.TestCase):
g_loss = fluid.layers.reduce_mean(
fluid.layers.sigmoid_cross_entropy_with_logits(
x=d_fake, label=to_variable(np.ones([2, 1], np.float32))))
g_loss._backward()
g_loss.backward()
sgd.minimize(g_loss)
for p in discriminator.parameters():
dy_params[p.name] = p.numpy()
......
......@@ -134,11 +134,11 @@ class TestImperativeMnist(unittest.TestCase):
loss = fluid.layers.cross_entropy(cost, label)
avg_loss = fluid.layers.mean(loss)
dy_out = avg_loss._numpy()
dy_out = avg_loss.numpy()
if epoch == 0 and batch_id == 0:
for param in mnist.parameters():
dy_param_init_value[param.name] = param._numpy()
dy_param_init_value[param.name] = param.numpy()
avg_loss._backward()
sgd.minimize(avg_loss)
......@@ -146,7 +146,7 @@ class TestImperativeMnist(unittest.TestCase):
dy_param_value = {}
for param in mnist.parameters():
dy_param_value[param.name] = param._numpy()
dy_param_value[param.name] = param.numpy()
with new_program_scope():
fluid.default_startup_program().random_seed = seed
......
......@@ -75,18 +75,18 @@ class TestImperativeOptimizerBase(unittest.TestCase):
cost = mlp(img)
avg_loss = fluid.layers.reduce_mean(cost)
dy_out = avg_loss._numpy()
dy_out = avg_loss.numpy()
if batch_id == 0:
for param in mlp.parameters():
dy_param_init_value[param.name] = param._numpy()
dy_param_init_value[param.name] = param.numpy()
avg_loss._backward()
optimizer.minimize(avg_loss)
mlp.clear_gradients()
dy_param_value = {}
for param in mlp.parameters():
dy_param_value[param.name] = param._numpy()
dy_param_value[param.name] = param.numpy()
with new_program_scope():
fluid.default_startup_program().random_seed = seed
......
......@@ -261,7 +261,7 @@ class TestDygraphPtbRnn(unittest.TestCase):
if i == 0:
for param in ptb_model.parameters():
dy_param_init[param.name] = param.numpy()
dy_loss._backward()
dy_loss.backward()
sgd.minimize(dy_loss)
ptb_model.clear_gradients()
if i == batch_num - 1:
......
......@@ -273,7 +273,7 @@ class TestDygraphResnet(unittest.TestCase):
if param.name not in dy_param_init_value:
dy_param_init_value[param.name] = param.numpy()
avg_loss._backward()
avg_loss.backward()
dy_grad_value = {}
for param in resnet.parameters():
......
......@@ -331,7 +331,7 @@ class TestImperativeResneXt(unittest.TestCase):
dy_param_init_value = {}
for param in se_resnext.parameters():
dy_param_init_value[param.name] = param._numpy()
dy_param_init_value[param.name] = param.numpy()
for batch_id, data in enumerate(train_reader()):
if batch_id >= batch_num:
......@@ -350,12 +350,12 @@ class TestImperativeResneXt(unittest.TestCase):
loss = fluid.layers.cross_entropy(input=out, label=label)
avg_loss = fluid.layers.mean(x=loss)
dy_out = avg_loss._numpy()
dy_out = avg_loss.numpy()
if batch_id == 0:
for param in se_resnext.parameters():
if param.name not in dy_param_init_value:
dy_param_init_value[param.name] = param._numpy()
dy_param_init_value[param.name] = param.numpy()
avg_loss._backward()
......@@ -372,7 +372,7 @@ class TestImperativeResneXt(unittest.TestCase):
dy_param_value = {}
for param in se_resnext.parameters():
dy_param_value[param.name] = param._numpy()
dy_param_value[param.name] = param.numpy()
with new_program_scope():
fluid.default_startup_program().random_seed = seed
......
......@@ -995,7 +995,7 @@ class TestDygraphTransformer(unittest.TestCase):
for param in transformer.parameters():
dy_param_init[param.name] = param.numpy()
dy_avg_cost._backward()
dy_avg_cost.backward()
optimizer.minimize(dy_avg_cost)
transformer.clear_gradients()
if i == batch_num - 1:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册