未验证 提交 3125733a 编写于 作者: W Weilong Wu 提交者: GitHub

rm _disable_legacy_dygraph and disable one mkldnn test file (#48721)

* rm _disable_legacy_dygraph

* disable test_flags_mkldnn_ops_on_off test
上级 d050c188
...@@ -5,6 +5,7 @@ file( ...@@ -5,6 +5,7 @@ file(
string(REPLACE ".py" "" TEST_OPS "${TEST_OPS}") string(REPLACE ".py" "" TEST_OPS "${TEST_OPS}")
list(REMOVE_ITEM TEST_OPS "test_onnx_format_quantization_mobilenetv1") list(REMOVE_ITEM TEST_OPS "test_onnx_format_quantization_mobilenetv1")
list(REMOVE_ITEM TEST_OPS "test_flags_mkldnn_ops_on_off")
if(WITH_MKLDNN AND NOT WIN32) if(WITH_MKLDNN AND NOT WIN32)
list(APPEND TEST_OPS "test_onnx_format_quantization_mobilenetv1") list(APPEND TEST_OPS "test_onnx_format_quantization_mobilenetv1")
...@@ -19,4 +20,4 @@ if(WITH_MKLDNN AND NOT WIN32) ...@@ -19,4 +20,4 @@ if(WITH_MKLDNN AND NOT WIN32)
set_tests_properties(test_onnx_format_quantization_mobilenetv1 set_tests_properties(test_onnx_format_quantization_mobilenetv1
PROPERTIES TIMEOUT 300) PROPERTIES TIMEOUT 300)
endif() endif()
set_tests_properties(test_flags_mkldnn_ops_on_off PROPERTIES TIMEOUT 120) # set_tests_properties(test_flags_mkldnn_ops_on_off PROPERTIES TIMEOUT 120)
...@@ -18,11 +18,9 @@ import numpy as np ...@@ -18,11 +18,9 @@ import numpy as np
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid.framework import _enable_legacy_dygraph, _global_flags from paddle.fluid.framework import _global_flags
from paddle.fluid.layer_helper import LayerHelper from paddle.fluid.layer_helper import LayerHelper
_enable_legacy_dygraph()
def check(): def check():
print( print(
......
...@@ -22,7 +22,6 @@ from decorator_helper import prog_scope ...@@ -22,7 +22,6 @@ from decorator_helper import prog_scope
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.core as core import paddle.fluid.core as core
import paddle.fluid.framework as framework
import paddle.fluid.layers as layers import paddle.fluid.layers as layers
from paddle.fluid import Program, program_guard from paddle.fluid import Program, program_guard
from paddle.fluid.backward import append_backward from paddle.fluid.backward import append_backward
...@@ -42,7 +41,6 @@ class TestAssignOp(op_test.OpTest): ...@@ -42,7 +41,6 @@ class TestAssignOp(op_test.OpTest):
self.check_output(check_eager=True) self.check_output(check_eager=True)
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False}) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
paddle.disable_static() paddle.disable_static()
framework._disable_legacy_dygraph()
def test_backward(self): def test_backward(self):
paddle.enable_static() paddle.enable_static()
...@@ -50,7 +48,6 @@ class TestAssignOp(op_test.OpTest): ...@@ -50,7 +48,6 @@ class TestAssignOp(op_test.OpTest):
self.check_grad(['X'], 'Out', check_eager=True) self.check_grad(['X'], 'Out', check_eager=True)
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False}) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
paddle.disable_static() paddle.disable_static()
framework._disable_legacy_dygraph()
class TestAssignFP16Op(op_test.OpTest): class TestAssignFP16Op(op_test.OpTest):
...@@ -67,7 +64,6 @@ class TestAssignFP16Op(op_test.OpTest): ...@@ -67,7 +64,6 @@ class TestAssignFP16Op(op_test.OpTest):
self.check_output(check_eager=True) self.check_output(check_eager=True)
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False}) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
paddle.disable_static() paddle.disable_static()
framework._disable_legacy_dygraph()
def test_backward(self): def test_backward(self):
paddle.enable_static() paddle.enable_static()
...@@ -75,7 +71,6 @@ class TestAssignFP16Op(op_test.OpTest): ...@@ -75,7 +71,6 @@ class TestAssignFP16Op(op_test.OpTest):
self.check_grad(['X'], 'Out', check_eager=True) self.check_grad(['X'], 'Out', check_eager=True)
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False}) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
paddle.disable_static() paddle.disable_static()
framework._disable_legacy_dygraph()
class TestAssignOpWithLoDTensorArray(unittest.TestCase): class TestAssignOpWithLoDTensorArray(unittest.TestCase):
......
...@@ -22,15 +22,12 @@ import paddle.fluid.core as core ...@@ -22,15 +22,12 @@ import paddle.fluid.core as core
from paddle.fluid.framework import ( from paddle.fluid.framework import (
EagerParamBase, EagerParamBase,
_current_expected_place, _current_expected_place,
_disable_legacy_dygraph,
_test_eager_guard,
in_dygraph_mode, in_dygraph_mode,
) )
class EagerScaleTestCase(unittest.TestCase): class EagerScaleTestCase(unittest.TestCase):
def test_scale_base(self): def test_scale_base(self):
with _test_eager_guard():
paddle.set_device("cpu") paddle.set_device("cpu")
arr = np.ones([4, 16, 16, 32]).astype('float32') arr = np.ones([4, 16, 16, 32]).astype('float32')
tensor = paddle.to_tensor(arr, 'float32', core.CPUPlace()) tensor = paddle.to_tensor(arr, 'float32', core.CPUPlace())
...@@ -43,7 +40,6 @@ class EagerScaleTestCase(unittest.TestCase): ...@@ -43,7 +40,6 @@ class EagerScaleTestCase(unittest.TestCase):
self.assertEqual(tensor.stop_gradient, True) self.assertEqual(tensor.stop_gradient, True)
def test_retain_grad_and_run_backward(self): def test_retain_grad_and_run_backward(self):
with _test_eager_guard():
paddle.set_device("cpu") paddle.set_device("cpu")
input_data = np.ones([4, 16, 16, 32]).astype('float32') input_data = np.ones([4, 16, 16, 32]).astype('float32')
...@@ -63,7 +59,6 @@ class EagerScaleTestCase(unittest.TestCase): ...@@ -63,7 +59,6 @@ class EagerScaleTestCase(unittest.TestCase):
np.testing.assert_array_equal(data_eager.grad.numpy(), input_data) np.testing.assert_array_equal(data_eager.grad.numpy(), input_data)
def test_retain_grad_and_run_backward_raises(self): def test_retain_grad_and_run_backward_raises(self):
with _test_eager_guard():
paddle.set_device("cpu") paddle.set_device("cpu")
input_data = np.ones([4, 16, 16, 32]).astype('float32') input_data = np.ones([4, 16, 16, 32]).astype('float32')
...@@ -74,9 +69,7 @@ class EagerScaleTestCase(unittest.TestCase): ...@@ -74,9 +69,7 @@ class EagerScaleTestCase(unittest.TestCase):
grad_data = np.ones([4, 16, 16, 32]).astype('float32') grad_data = np.ones([4, 16, 16, 32]).astype('float32')
grad_data2 = np.ones([4, 16]).astype('float32') grad_data2 = np.ones([4, 16]).astype('float32')
grad_eager = paddle.to_tensor(grad_data, 'float32', core.CPUPlace()) grad_eager = paddle.to_tensor(grad_data, 'float32', core.CPUPlace())
grad_eager2 = paddle.to_tensor( grad_eager2 = paddle.to_tensor(grad_data2, 'float32', core.CPUPlace())
grad_data2, 'float32', core.CPUPlace()
)
data_eager.retain_grads() data_eager.retain_grads()
...@@ -96,7 +89,6 @@ class EagerScaleTestCase(unittest.TestCase): ...@@ -96,7 +89,6 @@ class EagerScaleTestCase(unittest.TestCase):
class EagerDtypeTestCase(unittest.TestCase): class EagerDtypeTestCase(unittest.TestCase):
def check_to_tesnsor_and_numpy(self, dtype, proto_dtype): def check_to_tesnsor_and_numpy(self, dtype, proto_dtype):
with _test_eager_guard():
arr = np.random.random([4, 16, 16, 32]).astype(dtype) arr = np.random.random([4, 16, 16, 32]).astype(dtype)
tensor = paddle.to_tensor(arr, dtype) tensor = paddle.to_tensor(arr, dtype)
self.assertEqual(tensor.dtype, proto_dtype) self.assertEqual(tensor.dtype, proto_dtype)
...@@ -315,7 +307,7 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase): ...@@ -315,7 +307,7 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase):
place_list = [core.CPUPlace()] place_list = [core.CPUPlace()]
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
place_list.append(core.CUDAPlace(0)) place_list.append(core.CUDAPlace(0))
with _test_eager_guard():
for p in place_list: for p in place_list:
self.constructor(p) self.constructor(p)
...@@ -639,13 +631,13 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase): ...@@ -639,13 +631,13 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase):
place_list = [core.CPUPlace()] place_list = [core.CPUPlace()]
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
place_list.append(core.CUDAPlace(0)) place_list.append(core.CUDAPlace(0))
with _test_eager_guard():
for p in place_list: for p in place_list:
self.constructor_with_kwargs(p) self.constructor_with_kwargs(p)
def test_copy_and_copy_to(self): def test_copy_and_copy_to(self):
print("Test_copy_and_copy_to") print("Test_copy_and_copy_to")
with _test_eager_guard():
paddle.set_device("cpu") paddle.set_device("cpu")
arr = np.ones([4, 16, 16, 32]).astype('float32') arr = np.ones([4, 16, 16, 32]).astype('float32')
arr1 = np.zeros([4, 16]).astype('float32') arr1 = np.zeros([4, 16]).astype('float32')
...@@ -701,9 +693,7 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase): ...@@ -701,9 +693,7 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase):
tensor10 = paddle.to_tensor([1, 2, 3], place='gpu_pinned') tensor10 = paddle.to_tensor([1, 2, 3], place='gpu_pinned')
tensor11 = tensor10._copy_to(core.CUDAPlace(0), True) tensor11 = tensor10._copy_to(core.CUDAPlace(0), True)
np.testing.assert_array_equal( np.testing.assert_array_equal(tensor10.numpy(), tensor11.numpy())
tensor10.numpy(), tensor11.numpy()
)
else: else:
tensor3 = tensor2._copy_to(core.CPUPlace(), True) tensor3 = tensor2._copy_to(core.CPUPlace(), True)
np.testing.assert_array_equal(tensor3.numpy(), arr2) np.testing.assert_array_equal(tensor3.numpy(), arr2)
...@@ -718,7 +708,6 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase): ...@@ -718,7 +708,6 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase):
self.assertTrue(tensor4.place.is_cpu_place()) self.assertTrue(tensor4.place.is_cpu_place())
def test_share_buffer_to(self): def test_share_buffer_to(self):
with _test_eager_guard():
arr = np.ones([4, 16, 16, 32]).astype('float32') arr = np.ones([4, 16, 16, 32]).astype('float32')
arr1 = np.zeros([4, 16]).astype('float32') arr1 = np.zeros([4, 16]).astype('float32')
arr2 = np.ones([4, 16, 16, 32]).astype('float32') + np.ones( arr2 = np.ones([4, 16, 16, 32]).astype('float32') + np.ones(
...@@ -750,7 +739,6 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase): ...@@ -750,7 +739,6 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase):
self.assertTrue(tensor3._is_shared_buffer_with(tensor)) self.assertTrue(tensor3._is_shared_buffer_with(tensor))
def test_share_underline_tensor_to(self): def test_share_underline_tensor_to(self):
with _test_eager_guard():
arr = np.ones([4, 16, 16, 32]).astype('float32') arr = np.ones([4, 16, 16, 32]).astype('float32')
arr1 = np.zeros([4, 16]).astype('float32') arr1 = np.zeros([4, 16]).astype('float32')
arr2 = np.ones([4, 16, 16, 32]).astype('float32') + np.ones( arr2 = np.ones([4, 16, 16, 32]).astype('float32') + np.ones(
...@@ -783,7 +771,6 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase): ...@@ -783,7 +771,6 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase):
def test_properties(self): def test_properties(self):
print("Test_properties") print("Test_properties")
with _test_eager_guard():
paddle.set_device("cpu") paddle.set_device("cpu")
arr = np.ones([4, 16, 16, 32]).astype('float32') arr = np.ones([4, 16, 16, 32]).astype('float32')
tensor = paddle.to_tensor( tensor = paddle.to_tensor(
...@@ -808,11 +795,7 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase): ...@@ -808,11 +795,7 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase):
def test_global_properties(self): def test_global_properties(self):
print("Test_global_properties") print("Test_global_properties")
_disable_legacy_dygraph()
self.assertTrue(in_dygraph_mode()) self.assertTrue(in_dygraph_mode())
with _test_eager_guard():
self.assertTrue(in_dygraph_mode())
self.assertFalse(in_dygraph_mode())
def test_place_guard(self): def test_place_guard(self):
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
...@@ -829,7 +812,6 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase): ...@@ -829,7 +812,6 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase):
) )
def test_value(self): def test_value(self):
with _test_eager_guard():
arr = np.random.rand(4, 16, 16, 32).astype('float64') arr = np.random.rand(4, 16, 16, 32).astype('float64')
egr_tensor0 = core.eager.Tensor(value=arr) egr_tensor0 = core.eager.Tensor(value=arr)
...@@ -854,7 +836,6 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase): ...@@ -854,7 +836,6 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase):
self.assertTrue(egr_tensor0.value().get_tensor()._is_initialized()) self.assertTrue(egr_tensor0.value().get_tensor()._is_initialized())
def test_set_value(self): def test_set_value(self):
with _test_eager_guard():
ori_arr = np.random.rand(4, 16, 16, 32).astype('float32') ori_arr = np.random.rand(4, 16, 16, 32).astype('float32')
egr_tensor = core.eager.Tensor(value=ori_arr) egr_tensor = core.eager.Tensor(value=ori_arr)
self.assertEqual(egr_tensor.stop_gradient, True) self.assertEqual(egr_tensor.stop_gradient, True)
...@@ -872,7 +853,6 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase): ...@@ -872,7 +853,6 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase):
np.testing.assert_array_equal(egr_tensor.numpy(), new_arr) np.testing.assert_array_equal(egr_tensor.numpy(), new_arr)
def test_sharding_related_api(self): def test_sharding_related_api(self):
with _test_eager_guard():
arr0 = np.random.rand(4, 16, 16, 32).astype('float32') arr0 = np.random.rand(4, 16, 16, 32).astype('float32')
egr_tensor1 = core.eager.Tensor( egr_tensor1 = core.eager.Tensor(
arr0, core.CPUPlace(), True, False, "numpy_tensor1", False arr0, core.CPUPlace(), True, False, "numpy_tensor1", False
...@@ -881,7 +861,6 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase): ...@@ -881,7 +861,6 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase):
self.assertEqual(egr_tensor1._slice(0, 2)._numel(), 16384) self.assertEqual(egr_tensor1._slice(0, 2)._numel(), 16384)
def test_copy_gradient_from(self): def test_copy_gradient_from(self):
with _test_eager_guard():
np_x = np.random.random((2, 2)) np_x = np.random.random((2, 2))
np_y = np.random.random((2, 2)) np_y = np.random.random((2, 2))
x = paddle.to_tensor(np_x, dtype="float64", stop_gradient=False) x = paddle.to_tensor(np_x, dtype="float64", stop_gradient=False)
...@@ -892,7 +871,6 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase): ...@@ -892,7 +871,6 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase):
np.testing.assert_array_equal(x.grad.numpy(), np_y) np.testing.assert_array_equal(x.grad.numpy(), np_y)
def test_clear(self): def test_clear(self):
with _test_eager_guard():
np_x = np.random.random((3, 8, 8)) np_x = np.random.random((3, 8, 8))
x = paddle.to_tensor(np_x, dtype="float64") x = paddle.to_tensor(np_x, dtype="float64")
self.assertTrue(x._is_initialized()) self.assertTrue(x._is_initialized())
...@@ -901,7 +879,7 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase): ...@@ -901,7 +879,7 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase):
def test_use_gpudnn(self): def test_use_gpudnn(self):
np_x = np.random.random((3, 8, 8)) np_x = np.random.random((3, 8, 8))
with _test_eager_guard():
self.assertTrue(in_dygraph_mode()) self.assertTrue(in_dygraph_mode())
x = paddle.to_tensor(np_x, dtype="float64") x = paddle.to_tensor(np_x, dtype="float64")
y = x._use_gpudnn(False) y = x._use_gpudnn(False)
...@@ -909,20 +887,13 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase): ...@@ -909,20 +887,13 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase):
y = x._use_gpudnn(True) y = x._use_gpudnn(True)
np.testing.assert_array_equal(x.numpy(), y.numpy()) np.testing.assert_array_equal(x.numpy(), y.numpy())
self.assertFalse(in_dygraph_mode())
x = paddle.to_tensor(np_x, dtype="float64")
with self.assertRaises(AttributeError):
x = x._use_gpudnn(False)
class EagerParamBaseUsageTestCase(unittest.TestCase): class EagerParamBaseUsageTestCase(unittest.TestCase):
def test_print(self): def test_print(self):
with _test_eager_guard():
linear = paddle.nn.Linear(3, 3, bias_attr=False) linear = paddle.nn.Linear(3, 3, bias_attr=False)
print(linear.weight) print(linear.weight)
def test_copy(self): def test_copy(self):
with _test_eager_guard():
linear = paddle.nn.Linear(1, 3) linear = paddle.nn.Linear(1, 3)
linear_copy = copy.deepcopy(linear) linear_copy = copy.deepcopy(linear)
linear_copy2 = linear.weight._copy_to(core.CPUPlace(), True) linear_copy2 = linear.weight._copy_to(core.CPUPlace(), True)
...@@ -963,18 +934,6 @@ class EagerParamBaseUsageTestCase(unittest.TestCase): ...@@ -963,18 +934,6 @@ class EagerParamBaseUsageTestCase(unittest.TestCase):
paddle.set_default_dtype("float32") paddle.set_default_dtype("float32")
return res return res
def test_fp16_initializer(self):
res1 = list()
res2 = list()
paddle.seed(102)
paddle.framework.random._manual_program_seed(102)
with _test_eager_guard():
res1 = self.func_fp16_initilaizer()
res2 = self.func_fp16_initilaizer()
for i in range(len(res1)):
np.testing.assert_array_equal(res1[i], res2[i])
def func_layer_helper_base(self, value): def func_layer_helper_base(self, value):
base = paddle.fluid.layer_helper_base.LayerHelperBase( base = paddle.fluid.layer_helper_base.LayerHelperBase(
"test_layer", "test_layer" "test_layer", "test_layer"
...@@ -984,20 +943,7 @@ class EagerParamBaseUsageTestCase(unittest.TestCase): ...@@ -984,20 +943,7 @@ class EagerParamBaseUsageTestCase(unittest.TestCase):
def func_base_to_variable(self, value): def func_base_to_variable(self, value):
paddle.fluid.dygraph.base.to_variable(value) paddle.fluid.dygraph.base.to_variable(value)
def test_to_variable(self):
value = np.random.rand(4, 16, 16, 32).astype('float32')
res1 = None
res3 = None
with _test_eager_guard():
res1 = self.func_layer_helper_base(value)
res3 = self.func_base_to_variable(value)
res2 = self.func_layer_helper_base(value)
res4 = self.func_base_to_variable(value)
np.testing.assert_array_equal(res1, res2)
np.testing.assert_array_equal(res3, res4)
def test_backward_with_single_tensor(self): def test_backward_with_single_tensor(self):
with _test_eager_guard():
arr4 = np.random.rand(4, 16, 16, 32).astype('float32') arr4 = np.random.rand(4, 16, 16, 32).astype('float32')
egr_tensor12 = core.eager.Tensor(arr4, core.CPUPlace()) egr_tensor12 = core.eager.Tensor(arr4, core.CPUPlace())
egr_tensor12.retain_grads() egr_tensor12.retain_grads()
...@@ -1015,7 +961,6 @@ class EagerParamBaseUsageTestCase(unittest.TestCase): ...@@ -1015,7 +961,6 @@ class EagerParamBaseUsageTestCase(unittest.TestCase):
np.testing.assert_array_equal(egr_tensor12.gradient(), arr) np.testing.assert_array_equal(egr_tensor12.gradient(), arr)
def test_set_value(self): def test_set_value(self):
with _test_eager_guard():
linear = paddle.nn.Linear(1, 3) linear = paddle.nn.Linear(1, 3)
ori_place = linear.weight.place ori_place = linear.weight.place
new_weight = np.ones([1, 3]).astype('float32') new_weight = np.ones([1, 3]).astype('float32')
...@@ -1026,12 +971,5 @@ class EagerParamBaseUsageTestCase(unittest.TestCase): ...@@ -1026,12 +971,5 @@ class EagerParamBaseUsageTestCase(unittest.TestCase):
self.assertTrue(linear.weight.place._equals(ori_place)) self.assertTrue(linear.weight.place._equals(ori_place))
class EagerGuardTestCase(unittest.TestCase):
def test__test_eager_guard(self):
tracer = paddle.fluid.dygraph.tracer.Tracer()
with _test_eager_guard(tracer):
self.assertTrue(in_dygraph_mode())
if __name__ == "__main__": if __name__ == "__main__":
unittest.main() unittest.main()
...@@ -73,7 +73,6 @@ class LinalgLstsqTestCase(unittest.TestCase): ...@@ -73,7 +73,6 @@ class LinalgLstsqTestCase(unittest.TestCase):
def test_eager_dygraph(self): def test_eager_dygraph(self):
paddle.disable_static() paddle.disable_static()
paddle.fluid.framework._disable_legacy_dygraph()
for dev in self.devices: for dev in self.devices:
paddle.set_device(dev) paddle.set_device(dev)
place = paddle.CPUPlace() if dev == "cpu" else paddle.CUDAPlace(0) place = paddle.CPUPlace() if dev == "cpu" else paddle.CUDAPlace(0)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册