diff --git a/paddle/pybind/pybind.cc b/paddle/pybind/pybind.cc index 82f5b1922c6e97ee73a187e838350a965f1fd269..b4fd2a8989632e1aad99ee777ec26ba1146fa1e7 100644 --- a/paddle/pybind/pybind.cc +++ b/paddle/pybind/pybind.cc @@ -53,7 +53,7 @@ static size_t UniqueIntegerGenerator(const std::string &prefix) { return generators[prefix].fetch_add(1); } -bool IsCompileGPU() { +bool IsCompiledWithCUDA() { #ifndef PADDLE_WITH_CUDA return false; #else @@ -431,7 +431,7 @@ All parameter, weight, gradient are variables in Paddle. m.def("init_glog", framework::InitGLOG); m.def("init_devices", &framework::InitDevices); - m.def("is_compile_gpu", IsCompileGPU); + m.def("is_compiled_with_cuda", IsCompiledWithCUDA); m.def("set_feed_variable", framework::SetFeedVariable); m.def("get_fetch_variable", framework::GetFetchVariable); diff --git a/python/paddle/v2/fluid/__init__.py b/python/paddle/v2/fluid/__init__.py index 1f041c74597637a7b74e9690a60b6cd8fdd21cf8..787416aed1acf81138df06110317614dfe77fb48 100644 --- a/python/paddle/v2/fluid/__init__.py +++ b/python/paddle/v2/fluid/__init__.py @@ -89,7 +89,7 @@ def __bootstrap__(): read_env_flags = [ 'use_pinned_memory', 'check_nan_inf', 'do_memory_benchmark' ] - if core.is_compile_gpu(): + if core.is_compiled_with_cuda(): read_env_flags += ['fraction_of_gpu_memory_to_use', 'op_sync'] core.init_gflags([sys.argv[0]] + ["--tryfromenv=" + ",".join(read_env_flags)]) diff --git a/python/paddle/v2/fluid/tests/op_test.py b/python/paddle/v2/fluid/tests/op_test.py index 56f54de86f680653fbd97a7ce1d3f547d1657587..3f6d7070c2987d0557c60db84a2c679cd2cfe36b 100644 --- a/python/paddle/v2/fluid/tests/op_test.py +++ b/python/paddle/v2/fluid/tests/op_test.py @@ -334,7 +334,7 @@ class OpTest(unittest.TestCase): def check_output(self, atol=1e-5): places = [core.CPUPlace()] - if core.is_compile_gpu() and core.op_support_gpu(self.op_type): + if core.is_compiled_with_cuda() and core.op_support_gpu(self.op_type): places.append(core.CUDAPlace(0)) for place in places: self.check_output_with_place(place, atol) @@ -367,7 +367,7 @@ class OpTest(unittest.TestCase): max_relative_error=0.005, user_defined_grads=None): places = [core.CPUPlace()] - if core.is_compile_gpu() and core.op_support_gpu(self.op_type): + if core.is_compiled_with_cuda() and core.op_support_gpu(self.op_type): places.append(core.CUDAPlace(0)) for place in places: self.check_grad_with_place(place, inputs_to_check, output_names, diff --git a/python/paddle/v2/fluid/tests/test_adagrad_op.py b/python/paddle/v2/fluid/tests/test_adagrad_op.py index 86b0567ce123b00bace639fb8fe76cf3894abd6d..3556bcf8ba0d7f16b1d9bf50e46aebde83de2e25 100644 --- a/python/paddle/v2/fluid/tests/test_adagrad_op.py +++ b/python/paddle/v2/fluid/tests/test_adagrad_op.py @@ -180,7 +180,7 @@ class TestSparseAdagradOp(unittest.TestCase): def test_sparse_adagrad(self): places = [core.CPUPlace()] - if core.is_compile_gpu(): + if core.is_compiled_with_cuda(): places.append(core.CUDAPlace(0)) for place in places: self.check_with_place(place) diff --git a/python/paddle/v2/fluid/tests/test_adam_op.py b/python/paddle/v2/fluid/tests/test_adam_op.py index 10580adca714beeb7571312b8fdc4235ecaaccfe..df1fa8983c1984a9bb9f204aded148c17d3d609d 100644 --- a/python/paddle/v2/fluid/tests/test_adam_op.py +++ b/python/paddle/v2/fluid/tests/test_adam_op.py @@ -305,7 +305,7 @@ class TestSparseAdamOp(unittest.TestCase): def test_sparse_sgd(self): places = [core.CPUPlace()] - if core.is_compile_gpu(): + if core.is_compiled_with_cuda(): places.append(core.CUDAPlace(0)) for place in places: self.check_with_place(place) diff --git a/python/paddle/v2/fluid/tests/test_batch_norm_op.py b/python/paddle/v2/fluid/tests/test_batch_norm_op.py index 371bd426781b457582e74c33c80c46b5d56946fa..cf13166f255c782bdcec622d58d073a0943c8e1e 100644 --- a/python/paddle/v2/fluid/tests/test_batch_norm_op.py +++ b/python/paddle/v2/fluid/tests/test_batch_norm_op.py @@ -352,7 +352,7 @@ class TestBatchNormOp(OpTest): print "op test backward passed: ", str(place), data_layout places = [core.CPUPlace()] - if core.is_compile_gpu() and core.op_support_gpu("batch_norm"): + if core.is_compiled_with_cuda() and core.op_support_gpu("batch_norm"): places.append(core.CUDAPlace(0)) for place in places: diff --git a/python/paddle/v2/fluid/tests/test_gaussian_random_op.py b/python/paddle/v2/fluid/tests/test_gaussian_random_op.py index 82842534d4ac7ad8b0a8e0d877c6a638fb53cadc..79beb8b1fcef610bc2f3e8d18da4345baa9b99c3 100644 --- a/python/paddle/v2/fluid/tests/test_gaussian_random_op.py +++ b/python/paddle/v2/fluid/tests/test_gaussian_random_op.py @@ -33,7 +33,7 @@ class TestGaussianRandomOp(unittest.TestCase): self.gaussian_random_test(place=fluid.CPUPlace()) def test_gpu(self): - if core.is_compile_gpu(): + if core.is_compiled_with_cuda(): self.gaussian_random_test(place=fluid.CUDAPlace(0)) def gaussian_random_test(self, place): diff --git a/python/paddle/v2/fluid/tests/test_normalization_wrapper.py b/python/paddle/v2/fluid/tests/test_normalization_wrapper.py index 57f14f6b9cc9c7cf9ae93274cf3d7763350e6e10..6b71f2a923f0cf0744d6b2190aa35830dcf15f24 100644 --- a/python/paddle/v2/fluid/tests/test_normalization_wrapper.py +++ b/python/paddle/v2/fluid/tests/test_normalization_wrapper.py @@ -46,7 +46,7 @@ class TestNormalization(unittest.TestCase): """Run the test program. """ places = [core.CPUPlace()] - if core.is_compile_gpu(): + if core.is_compiled_with_cuda(): places.append(core.CUDAPlace(0)) for place in places: diff --git a/python/paddle/v2/fluid/tests/test_op_support_gpu.py b/python/paddle/v2/fluid/tests/test_op_support_gpu.py index 34939818126b1d747fb76861bbd691894fb3759b..7de02a8fda22a3db82a2e0b5e6fa9c9f2718fa12 100644 --- a/python/paddle/v2/fluid/tests/test_op_support_gpu.py +++ b/python/paddle/v2/fluid/tests/test_op_support_gpu.py @@ -18,7 +18,8 @@ import paddle.v2.fluid.core as core class TestOpSupportGPU(unittest.TestCase): def test_case(self): - self.assertEqual(core.is_compile_gpu(), core.op_support_gpu("sum")) + self.assertEqual(core.is_compiled_with_cuda(), + core.op_support_gpu("sum")) if __name__ == '__main__': diff --git a/python/paddle/v2/fluid/tests/test_parallel_op.py b/python/paddle/v2/fluid/tests/test_parallel_op.py index dfde492c7cd930615c030bb0c8e5a2cf36ff59a8..3b86ccb6172f8757b854ab6af9ee07ff3274fee4 100644 --- a/python/paddle/v2/fluid/tests/test_parallel_op.py +++ b/python/paddle/v2/fluid/tests/test_parallel_op.py @@ -53,7 +53,7 @@ class BaseParallelForTest(unittest.TestCase): fetch=fetch, place=cpu, use_parallel=True) - if fluid.core.is_compile_gpu(): + if fluid.core.is_compiled_with_cuda(): gpu = fluid.CUDAPlace(0) result_gpu = self._run_test_impl_( callback=callback, diff --git a/python/paddle/v2/fluid/tests/test_profiler.py b/python/paddle/v2/fluid/tests/test_profiler.py index 34700df37d22cf71bad2d86efa4718a3767c2d4f..09b2d08401878448b4b3f3c6c03193e255e9ffeb 100644 --- a/python/paddle/v2/fluid/tests/test_profiler.py +++ b/python/paddle/v2/fluid/tests/test_profiler.py @@ -23,7 +23,7 @@ import paddle.v2.fluid.core as core class TestProfiler(unittest.TestCase): def test_nvprof(self): - if not fluid.core.is_compile_gpu(): + if not fluid.core.is_compiled_with_cuda(): return epoc = 8 dshape = [4, 3, 28, 28] @@ -42,7 +42,7 @@ class TestProfiler(unittest.TestCase): os.remove(output_file) def net_profiler(self, state): - if state == 'GPU' and not core.is_compile_gpu(): + if state == 'GPU' and not core.is_compiled_with_cuda(): return startup_program = fluid.Program() main_program = fluid.Program() diff --git a/python/paddle/v2/fluid/tests/test_reorder_lod_tensor.py b/python/paddle/v2/fluid/tests/test_reorder_lod_tensor.py index 74cd6de9e6fde70c001bb2189c4976cdd8e34633..0a223bac0ce8fd626881cef983c7cd960f2c5ba8 100644 --- a/python/paddle/v2/fluid/tests/test_reorder_lod_tensor.py +++ b/python/paddle/v2/fluid/tests/test_reorder_lod_tensor.py @@ -45,7 +45,7 @@ class TestReorderLoDTensor(unittest.TestCase): outputs = [] input_grads = [] places = [core.CPUPlace()] - if core.is_compile_gpu(): + if core.is_compiled_with_cuda(): places.append(core.CUDAPlace(0)) for place in places: self.set_inputs(place) diff --git a/python/paddle/v2/fluid/tests/test_sgd_op.py b/python/paddle/v2/fluid/tests/test_sgd_op.py index f87927968b0fdb00ec207ff1d52be9e0d81af139..ba2ca1683f9f6d72bbd1550df89c7424d223a1d9 100644 --- a/python/paddle/v2/fluid/tests/test_sgd_op.py +++ b/python/paddle/v2/fluid/tests/test_sgd_op.py @@ -91,7 +91,7 @@ class TestSparseSGDOp(unittest.TestCase): def test_sparse_sgd(self): places = [core.CPUPlace()] - if core.is_compile_gpu(): + if core.is_compiled_with_cuda(): places.append(core.CUDAPlace(0)) for place in places: self.check_with_place(place) diff --git a/python/paddle/v2/fluid/tests/test_split_selected_rows_op.py b/python/paddle/v2/fluid/tests/test_split_selected_rows_op.py index 37c6587c4151a89563f93cab35d63b2419ef88ab..343aa20066146ae08462a92f1efaa20c4d4b5ed8 100644 --- a/python/paddle/v2/fluid/tests/test_split_selected_rows_op.py +++ b/python/paddle/v2/fluid/tests/test_split_selected_rows_op.py @@ -21,7 +21,7 @@ from paddle.v2.fluid.op import Operator class TestSpliteSelectedRows(unittest.TestCase): def get_places(self): places = [core.CPUPlace()] - if core.is_compile_gpu(): + if core.is_compiled_with_cuda(): places.append(core.CUDAPlace(0)) return places diff --git a/python/paddle/v2/fluid/tests/test_uniform_random_op.py b/python/paddle/v2/fluid/tests/test_uniform_random_op.py index b2a39f975eb461292dc2e7be332a26931684bf90..94cf416fad8f02cdea8017ae1350fa264ce644b1 100644 --- a/python/paddle/v2/fluid/tests/test_uniform_random_op.py +++ b/python/paddle/v2/fluid/tests/test_uniform_random_op.py @@ -36,7 +36,7 @@ class TestUniformRandomOp(unittest.TestCase): self.uniform_random_test(place=core.CPUPlace()) def test_gpu(self): - if core.is_compile_gpu(): + if core.is_compiled_with_cuda(): self.uniform_random_test(place=core.CUDAPlace(0)) def uniform_random_test(self, place):