未验证 提交 5c162fe6 编写于 作者: L littletomatodonkey 提交者: GitHub

fix reg api ut fail (#29921)

上级 a4b9daf9
......@@ -18,6 +18,7 @@ import unittest
from functools import partial
import contextlib
import numpy as np
import random
import paddle
import paddle.fluid.core as core
import paddle.fluid as fluid
......@@ -29,6 +30,7 @@ from paddle.fluid.backward import append_backward
class TestL2DecayRegularizer(unittest.TestCase):
def test_l2decay_regularizer(self):
paddle.enable_static()
program = framework.Program()
block = program.global_block()
mul_x = block.create_parameter(
......@@ -66,6 +68,7 @@ class TestL2DecayRegularizer(unittest.TestCase):
class TestL1DecayRegularizer(unittest.TestCase):
def test_l2decay_regularizer(self):
paddle.enable_static()
program = framework.Program()
block = program.global_block()
mul_x = block.create_parameter(
......@@ -124,16 +127,14 @@ def bow_net(data,
prediction = fluid.layers.fc(input=[fc_2], size=class_dim, act="softmax")
cost = fluid.layers.cross_entropy(input=prediction, label=label)
avg_cost = fluid.layers.mean(x=cost)
return avg_cost
class TestRegularizer(unittest.TestCase):
def setUp(self):
self.word_dict = paddle.dataset.imdb.word_dict()
reader = paddle.batch(
paddle.dataset.imdb.train(self.word_dict), batch_size=1)()
self.train_data = [next(reader) for _ in range(1)]
self.word_len = 1500
self.train_data = [[(random.sample(range(1000), 10), [0])]
for _ in range(2)]
def get_places(self):
places = [core.CPUPlace()]
......@@ -179,7 +180,7 @@ class TestRegularizer(unittest.TestCase):
name="words", shape=[1], dtype="int64", lod_level=1)
label = fluid.layers.data(name="label", shape=[1], dtype="int64")
avg_cost = model(data, label, len(self.word_dict))
avg_cost = model(data, label, self.word_len)
optimizer = fluid.optimizer.Adagrad(
learning_rate=0.1,
......@@ -200,7 +201,7 @@ class TestRegularizer(unittest.TestCase):
name="words", shape=[1], dtype="int64", lod_level=1)
label = fluid.layers.data(name="label", shape=[1], dtype="int64")
avg_cost_l2 = model(data, label, len(self.word_dict))
avg_cost_l2 = model(data, label, self.word_len)
param_list = fluid.default_main_program().block(0).all_parameters()
para_sum = []
......
......@@ -18,6 +18,7 @@ import unittest
from functools import partial
import contextlib
import numpy as np
import random
import paddle
import paddle.fluid.core as core
import paddle.fluid as fluid
......@@ -55,10 +56,9 @@ def bow_net(data,
class TestRegularizer(unittest.TestCase):
def setUp(self):
self.word_dict = paddle.dataset.imdb.word_dict()
reader = paddle.batch(
paddle.dataset.imdb.train(self.word_dict), batch_size=1)()
self.train_data = [next(reader) for _ in range(1)]
self.word_len = 1500
self.train_data = [[(random.sample(range(1000), 10), [0])]
for _ in range(2)]
def get_places(self):
places = [core.CPUPlace()]
......@@ -104,7 +104,7 @@ class TestRegularizer(unittest.TestCase):
name="words", shape=[1], dtype="int64", lod_level=1)
label = fluid.layers.data(name="label", shape=[1], dtype="int64")
avg_cost = model(data, label, len(self.word_dict))
avg_cost = model(data, label, self.word_len)
optimizer = fluid.optimizer.Adagrad(
learning_rate=0.1,
......@@ -125,7 +125,7 @@ class TestRegularizer(unittest.TestCase):
name="words", shape=[1], dtype="int64", lod_level=1)
label = fluid.layers.data(name="label", shape=[1], dtype="int64")
avg_cost_l2 = model(data, label, len(self.word_dict))
avg_cost_l2 = model(data, label, self.word_len)
param_list = fluid.default_main_program().block(0).all_parameters()
para_sum = []
......@@ -140,6 +140,7 @@ class TestRegularizer(unittest.TestCase):
return param_sum
def test_l2(self):
paddle.enable_static()
for place in self.get_places():
dense_sparse_p_sum = []
for sparse in [True, False]:
......@@ -159,6 +160,7 @@ class TestRegularizer(unittest.TestCase):
rtol=5e-5)
def test_repeated_regularization(self):
paddle.enable_static()
l1 = paddle.regularizer.L1Decay(0.1)
l2 = paddle.regularizer.L2Decay(0.01)
fc_param_attr = fluid.ParamAttr(regularizer=l1)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册