提交 2c6eb21d 编写于 作者: L lelelelelez

fix function-redefined

上级 30416052
......@@ -391,16 +391,6 @@ class _DataLoaderIterMultiProcess(_DataLoaderIterBase):
for _ in range(self._outstanding_capacity):
self._try_put_indices()
def _clear_and_remove_data_queue(self):
if self._data_queue is not None:
while True:
try:
self._data_queue.get_nowait()
except:
self._data_queue.cancel_join_thread()
self._data_queue.close()
break
def _shutdown_worker(self, worker_id, shutdown=False):
if self._worker_status[worker_id] or (self._persistent_workers and
shutdown):
......
......@@ -219,7 +219,7 @@ class NetWithDictPop(paddle.nn.Layer):
return x
class TestDictPop(TestNetWithDict):
class TestDictPop3(TestNetWithDict):
def setUp(self):
self.x = np.array([2, 2]).astype('float32')
......
......@@ -117,9 +117,6 @@ class TestMKLDNNSwishDim2(TestSwish):
def init_dtype(self):
self.dtype = np.float32
def init_dtype(self):
self.dtype = np.float32
class TestMKLDNNHardSwishDim2(TestHardSwish):
def setUp(self):
......
......@@ -122,9 +122,6 @@ class TestAsymPad(TestPool2D_Op):
def init_kernel_type(self):
self.use_mkldnn = True
def init_global_pool(self):
self.global_pool = False
def init_data_type(self):
self.dtype = np.float32
......
......@@ -79,7 +79,7 @@ class TestSequenceUnpadOp4(TestSequenceUnpadOp):
self.dtype = "float64"
class TestSequenceUnpadOp4(TestSequenceUnpadOp):
class TestSequenceUnpadOp5(TestSequenceUnpadOp):
def init(self):
self.length = [0, 4, 3, 0]
self.x_shape = (4, 5, 3, 3, 6)
......
......@@ -436,7 +436,7 @@ class TestArgsortImperative3(TestArgsortImperative):
self.axis = 1
class TestArgsortImperative2(TestArgsortImperative):
class TestArgsortImperative4(TestArgsortImperative):
def init(self):
self.input_shape = [2, 3, 4]
self.axis = 1
......
......@@ -484,7 +484,7 @@ class TestBicubicOpError(unittest.TestCase):
align_corners=False,
scale_factor=[1, 2, 2])
def test_scale_value():
def test_scale_value_1():
x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32")
out = interpolate(
x,
......
......@@ -46,7 +46,7 @@ class TestFlattenOp(OpTest):
self.attrs = {"axis": self.axis}
class TestFlattenOp(TestFlattenOp):
class TestFlattenOp_V1(TestFlattenOp):
def init_test_case(self):
self.in_shape = (3, 2, 5, 4)
self.axis = 0
......
......@@ -43,7 +43,7 @@ class TestFlattenOp(OpTest):
self.attrs = {"axis": self.axis}
class TestFlattenOp(TestFlattenOp):
class TestFlattenOp_V1(TestFlattenOp):
def init_test_case(self):
self.in_shape = (3, 2, 2, 10)
self.axis = 0
......
......@@ -49,7 +49,7 @@ class TestFleetBase(unittest.TestCase):
optimizer.minimize(avg_cost)
class TestFleetBase(unittest.TestCase):
class TestFleetBase1(unittest.TestCase):
def setUp(self):
os.environ["POD_IP"] = "127.0.0.1"
os.environ["PADDLE_TRAINER_ENDPOINTS"] = "127.0.0.1:36001"
......
......@@ -209,7 +209,7 @@ class TestFleetGraphExecutionMetaOptimizer(unittest.TestCase):
proc_b.start()
wait([proc_a, proc_b])
def test_graph_execution_optimizer(self):
def test_graph_execution_optimizer_v2(self):
port_a = self._dist_ut_port_0 + 6
port_b = self._dist_ut_port_1 + 6
node_a = {
......
......@@ -292,7 +292,7 @@ class TestFleetShardingMetaOptimizer(TestFleetMetaOptimizer):
])
class TestFleetMetaOptimizer(TestFleetMetaOptimizer):
class TestFleetMetaOptimizer_V1(TestFleetMetaOptimizer):
def setUp(self):
os.environ["PADDLE_TRAINER_ID"] = "3"
os.environ[
......
......@@ -375,18 +375,6 @@ class TestImperativeAutoPrune(unittest.TestCase):
self.assertTrue(case3.linear2.weight._grad_ivar() is None)
self.assertTrue(case3.linear.weight._grad_ivar() is not None)
def test_case2_prune_no_grad_branch(self):
with fluid.dygraph.guard():
value1 = np.arange(784).reshape(1, 784)
value2 = np.arange(1).reshape(1, 1)
v1 = fluid.dygraph.to_variable(value1).astype("float32")
v2 = fluid.dygraph.to_variable(value2).astype("float32")
case3 = AutoPruneLayer2(input_size=784)
loss = case3(v1, v2)
loss.backward()
self.assertTrue(case3.linear2.weight._grad_ivar() is None)
self.assertTrue(case3.linear.weight._grad_ivar() is not None)
def test_case3_prune_no_grad_branch2(self):
with fluid.dygraph.guard():
value1 = np.arange(1).reshape(1, 1)
......
......@@ -143,10 +143,10 @@ class TestLinspaceOpError(unittest.TestCase):
self.assertRaises(TypeError, test_start_type)
def test_end_dtype():
def test_end_type():
fluid.layers.linspace(0, [10], 1, dtype="float32")
self.assertRaises(TypeError, test_end_dtype)
self.assertRaises(TypeError, test_end_type)
def test_step_dtype():
fluid.layers.linspace(0, 10, [0], dtype="float32")
......
......@@ -27,7 +27,7 @@ class TestHybridPipeParallel(TestMultipleGpus):
def test_hybrid_parallel_pp_tuple_inputs(self):
self.run_mnist_2gpu('hybrid_parallel_pp_embedding.py')
def test_hybrid_parallel_pp_tuple_inputs(self):
def test_hybrid_parallel_shared_weight(self):
self.run_mnist_2gpu('hybrid_parallel_shared_weight.py')
def test_pipeline_parallel(self):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册