提交 4c55502b 编写于 作者: L lelelelelez

fix function-redefined

上级 6c09496a
...@@ -391,16 +391,6 @@ class _DataLoaderIterMultiProcess(_DataLoaderIterBase): ...@@ -391,16 +391,6 @@ class _DataLoaderIterMultiProcess(_DataLoaderIterBase):
for _ in range(self._outstanding_capacity): for _ in range(self._outstanding_capacity):
self._try_put_indices() self._try_put_indices()
def _clear_and_remove_data_queue(self):
if self._data_queue is not None:
while True:
try:
self._data_queue.get_nowait()
except:
self._data_queue.cancel_join_thread()
self._data_queue.close()
break
def _shutdown_worker(self, worker_id, shutdown=False): def _shutdown_worker(self, worker_id, shutdown=False):
if self._worker_status[worker_id] or (self._persistent_workers and if self._worker_status[worker_id] or (self._persistent_workers and
shutdown): shutdown):
......
...@@ -219,7 +219,7 @@ class NetWithDictPop(paddle.nn.Layer): ...@@ -219,7 +219,7 @@ class NetWithDictPop(paddle.nn.Layer):
return x return x
class TestDictPop(TestNetWithDict): class TestDictPop3(TestNetWithDict):
def setUp(self): def setUp(self):
self.x = np.array([2, 2]).astype('float32') self.x = np.array([2, 2]).astype('float32')
......
...@@ -117,9 +117,6 @@ class TestMKLDNNSwishDim2(TestSwish): ...@@ -117,9 +117,6 @@ class TestMKLDNNSwishDim2(TestSwish):
def init_dtype(self): def init_dtype(self):
self.dtype = np.float32 self.dtype = np.float32
def init_dtype(self):
self.dtype = np.float32
class TestMKLDNNHardSwishDim2(TestHardSwish): class TestMKLDNNHardSwishDim2(TestHardSwish):
def setUp(self): def setUp(self):
......
...@@ -122,9 +122,6 @@ class TestAsymPad(TestPool2D_Op): ...@@ -122,9 +122,6 @@ class TestAsymPad(TestPool2D_Op):
def init_kernel_type(self): def init_kernel_type(self):
self.use_mkldnn = True self.use_mkldnn = True
def init_global_pool(self):
self.global_pool = False
def init_data_type(self): def init_data_type(self):
self.dtype = np.float32 self.dtype = np.float32
......
...@@ -79,7 +79,7 @@ class TestSequenceUnpadOp4(TestSequenceUnpadOp): ...@@ -79,7 +79,7 @@ class TestSequenceUnpadOp4(TestSequenceUnpadOp):
self.dtype = "float64" self.dtype = "float64"
class TestSequenceUnpadOp4(TestSequenceUnpadOp): class TestSequenceUnpadOp5(TestSequenceUnpadOp):
def init(self): def init(self):
self.length = [0, 4, 3, 0] self.length = [0, 4, 3, 0]
self.x_shape = (4, 5, 3, 3, 6) self.x_shape = (4, 5, 3, 3, 6)
......
...@@ -436,7 +436,7 @@ class TestArgsortImperative3(TestArgsortImperative): ...@@ -436,7 +436,7 @@ class TestArgsortImperative3(TestArgsortImperative):
self.axis = 1 self.axis = 1
class TestArgsortImperative2(TestArgsortImperative): class TestArgsortImperative4(TestArgsortImperative):
def init(self): def init(self):
self.input_shape = [2, 3, 4] self.input_shape = [2, 3, 4]
self.axis = 1 self.axis = 1
......
...@@ -484,7 +484,7 @@ class TestBicubicOpError(unittest.TestCase): ...@@ -484,7 +484,7 @@ class TestBicubicOpError(unittest.TestCase):
align_corners=False, align_corners=False,
scale_factor=[1, 2, 2]) scale_factor=[1, 2, 2])
def test_scale_value(): def test_scale_value_1():
x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32") x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32")
out = interpolate( out = interpolate(
x, x,
...@@ -535,7 +535,7 @@ class TestBicubicOpError(unittest.TestCase): ...@@ -535,7 +535,7 @@ class TestBicubicOpError(unittest.TestCase):
self.assertRaises(ValueError, test_outshape_and_scale) self.assertRaises(ValueError, test_outshape_and_scale)
self.assertRaises(ValueError, test_align_corners_and_nearest) self.assertRaises(ValueError, test_align_corners_and_nearest)
self.assertRaises(ValueError, test_scale_shape) self.assertRaises(ValueError, test_scale_shape)
self.assertRaises(ValueError, test_scale_value) self.assertRaises(ValueError, test_scale_value_1)
self.assertRaises(ValueError, test_size_and_scale) self.assertRaises(ValueError, test_size_and_scale)
self.assertRaises(ValueError, test_size_and_scale2) self.assertRaises(ValueError, test_size_and_scale2)
self.assertRaises(TypeError, test_size_type) self.assertRaises(TypeError, test_size_type)
......
...@@ -46,7 +46,7 @@ class TestFlattenOp(OpTest): ...@@ -46,7 +46,7 @@ class TestFlattenOp(OpTest):
self.attrs = {"axis": self.axis} self.attrs = {"axis": self.axis}
class TestFlattenOp(TestFlattenOp): class TestFlattenOp1(TestFlattenOp):
def init_test_case(self): def init_test_case(self):
self.in_shape = (3, 2, 5, 4) self.in_shape = (3, 2, 5, 4)
self.axis = 0 self.axis = 0
......
...@@ -43,7 +43,7 @@ class TestFlattenOp(OpTest): ...@@ -43,7 +43,7 @@ class TestFlattenOp(OpTest):
self.attrs = {"axis": self.axis} self.attrs = {"axis": self.axis}
class TestFlattenOp(TestFlattenOp): class TestFlattenOp1(TestFlattenOp):
def init_test_case(self): def init_test_case(self):
self.in_shape = (3, 2, 2, 10) self.in_shape = (3, 2, 2, 10)
self.axis = 0 self.axis = 0
......
...@@ -209,7 +209,7 @@ class TestFleetGraphExecutionMetaOptimizer(unittest.TestCase): ...@@ -209,7 +209,7 @@ class TestFleetGraphExecutionMetaOptimizer(unittest.TestCase):
proc_b.start() proc_b.start()
wait([proc_a, proc_b]) wait([proc_a, proc_b])
def test_graph_execution_optimizer(self): def test_graph_execution_optimizer_v2(self):
port_a = self._dist_ut_port_0 + 6 port_a = self._dist_ut_port_0 + 6
port_b = self._dist_ut_port_1 + 6 port_b = self._dist_ut_port_1 + 6
node_a = { node_a = {
......
...@@ -292,7 +292,7 @@ class TestFleetShardingMetaOptimizer(TestFleetMetaOptimizer): ...@@ -292,7 +292,7 @@ class TestFleetShardingMetaOptimizer(TestFleetMetaOptimizer):
]) ])
class TestFleetMetaOptimizer(TestFleetMetaOptimizer): class TestFleetMetaOptimizer_V1(TestFleetMetaOptimizer):
def setUp(self): def setUp(self):
os.environ["PADDLE_TRAINER_ID"] = "3" os.environ["PADDLE_TRAINER_ID"] = "3"
os.environ[ os.environ[
......
...@@ -375,18 +375,6 @@ class TestImperativeAutoPrune(unittest.TestCase): ...@@ -375,18 +375,6 @@ class TestImperativeAutoPrune(unittest.TestCase):
self.assertTrue(case3.linear2.weight._grad_ivar() is None) self.assertTrue(case3.linear2.weight._grad_ivar() is None)
self.assertTrue(case3.linear.weight._grad_ivar() is not None) self.assertTrue(case3.linear.weight._grad_ivar() is not None)
def test_case2_prune_no_grad_branch(self):
with fluid.dygraph.guard():
value1 = np.arange(784).reshape(1, 784)
value2 = np.arange(1).reshape(1, 1)
v1 = fluid.dygraph.to_variable(value1).astype("float32")
v2 = fluid.dygraph.to_variable(value2).astype("float32")
case3 = AutoPruneLayer2(input_size=784)
loss = case3(v1, v2)
loss.backward()
self.assertTrue(case3.linear2.weight._grad_ivar() is None)
self.assertTrue(case3.linear.weight._grad_ivar() is not None)
def test_case3_prune_no_grad_branch2(self): def test_case3_prune_no_grad_branch2(self):
with fluid.dygraph.guard(): with fluid.dygraph.guard():
value1 = np.arange(1).reshape(1, 1) value1 = np.arange(1).reshape(1, 1)
......
...@@ -143,10 +143,10 @@ class TestLinspaceOpError(unittest.TestCase): ...@@ -143,10 +143,10 @@ class TestLinspaceOpError(unittest.TestCase):
self.assertRaises(TypeError, test_start_type) self.assertRaises(TypeError, test_start_type)
def test_end_dtype(): def test_end_type():
fluid.layers.linspace(0, [10], 1, dtype="float32") fluid.layers.linspace(0, [10], 1, dtype="float32")
self.assertRaises(TypeError, test_end_dtype) self.assertRaises(TypeError, test_end_type)
def test_step_dtype(): def test_step_dtype():
fluid.layers.linspace(0, 10, [0], dtype="float32") fluid.layers.linspace(0, 10, [0], dtype="float32")
......
...@@ -27,7 +27,7 @@ class TestHybridPipeParallel(TestMultipleGpus): ...@@ -27,7 +27,7 @@ class TestHybridPipeParallel(TestMultipleGpus):
def test_hybrid_parallel_pp_tuple_inputs(self): def test_hybrid_parallel_pp_tuple_inputs(self):
self.run_mnist_2gpu('hybrid_parallel_pp_embedding.py') self.run_mnist_2gpu('hybrid_parallel_pp_embedding.py')
def test_hybrid_parallel_pp_tuple_inputs(self): def test_hybrid_parallel_shared_weight(self):
self.run_mnist_2gpu('hybrid_parallel_shared_weight.py') self.run_mnist_2gpu('hybrid_parallel_shared_weight.py')
def test_pipeline_parallel(self): def test_pipeline_parallel(self):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册