diff --git a/paddle/fluid/pybind/eager_method.cc b/paddle/fluid/pybind/eager_method.cc index 17d210cc2f176c1e9a82b5c05d8ccc9bd498e1a3..6f0bd5fb16d14e1d8302af4c9dd543df8e9254e2 100644 --- a/paddle/fluid/pybind/eager_method.cc +++ b/paddle/fluid/pybind/eager_method.cc @@ -1450,28 +1450,28 @@ static PyObject* tensor__copy_gradient_from(TensorObject* self, EAGER_CATCH_AND_THROW_RETURN_NULL } -static PyObject* tensor__use_cudnn(TensorObject* self, - PyObject* args, - PyObject* kwargs) { +static PyObject* tensor__use_gpudnn(TensorObject* self, + PyObject* args, + PyObject* kwargs) { EAGER_TRY PADDLE_ENFORCE(self->tensor.defined() && self->tensor.is_dense_tensor(), paddle::platform::errors::Fatal( - "function _use_cudnn is only effective for DenseTensor")); + "function _use_gpudnn is only effective for DenseTensor")); - bool use_cudnn = pybind::CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 0), 0); + bool use_gpudnn = pybind::CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 0), 0); - // Set the same use_cudnn attribute, return directly + // Set the same use_gpudnn attribute, return directly phi::DenseTensor* dense_tensor = static_cast(self->tensor.impl().get()); phi::DenseTensorMeta* dense_tensor_meta = phi::DenseTensorUtils::GetMutableMeta(dense_tensor); - if (use_cudnn == dense_tensor_meta->use_cudnn) { + if (use_gpudnn == dense_tensor_meta->use_gpudnn) { return ToPyObject(self->tensor); } - // Share all other members of Tensor except use_cudnn + // Share all other members of Tensor except use_gpudnn phi::DenseTensorMeta target_dense_meta = *dense_tensor_meta; - target_dense_meta.use_cudnn = use_cudnn; + target_dense_meta.use_gpudnn = use_gpudnn; phi::DenseTensor target_dense_tensor; target_dense_tensor.ShareDataWith(*dense_tensor); target_dense_tensor.set_meta(target_dense_meta); @@ -1481,7 +1481,7 @@ static PyObject* tensor__use_cudnn(TensorObject* self, self->tensor.name()); target_tensor.set_autograd_meta(self->tensor.mutable_autograd_meta()); VLOG(4) << "Tensor: " << target_tensor.name() - << " set use_cudnn = " << use_cudnn; + << " set use_gpudnn = " << use_gpudnn; return ToPyObject(target_tensor); EAGER_CATCH_AND_THROW_RETURN_NULL @@ -2053,8 +2053,8 @@ PyMethodDef variable_methods[] = { (PyCFunction)(void (*)(void))tensor__copy_gradient_from, METH_VARARGS | METH_KEYWORDS, NULL}, - {"_tensor_use_cudnn", - (PyCFunction)(void (*)(void))tensor__use_cudnn, + {"_tensor_use_gpudnn", + (PyCFunction)(void (*)(void))tensor__use_gpudnn, METH_VARARGS | METH_KEYWORDS, NULL}, /** the methods to adapt old dygraph, will be removed in the future **/ diff --git a/paddle/phi/api/lib/kernel_dispatch.cc b/paddle/phi/api/lib/kernel_dispatch.cc index 941bc880b99f0b351f34ebad76032a5a42455c0f..074da80bbfb6e9d09eb2f5c87094be103bd68c27 100644 --- a/paddle/phi/api/lib/kernel_dispatch.cc +++ b/paddle/phi/api/lib/kernel_dispatch.cc @@ -57,7 +57,7 @@ BackendSet GetTensorBackendSet(const phi::TensorBase& t) { phi::Backend backend_key = phi::TransToPhiBackend(t.place()); BackendSet backend_set(backend_key); if (backend_key == Backend::GPU && phi::DenseTensor::classof(&t) && - static_cast(t).meta().use_cudnn) { + static_cast(t).meta().use_gpudnn) { backend_set = backend_set | BackendSet(Backend::GPUDNN); } return backend_set; @@ -126,7 +126,7 @@ Backend ParseBackend(const Tensor& tensor) { Backend backend_key = phi::TransToPhiBackend(tensor.place()); if (backend_key == Backend::GPU && phi::DenseTensor::classof(tensor.impl().get()) && - static_cast(tensor.impl().get())->meta().use_cudnn) { + static_cast(tensor.impl().get())->meta().use_gpudnn) { return Backend::GPUDNN; } return backend_key; diff --git a/paddle/phi/api/lib/kernel_dispatch.h b/paddle/phi/api/lib/kernel_dispatch.h index bfe8eba2444b6c5fc94962427f225f050de114c8..23b375eaf6ebee6ab91305e1cd8c75c70143fb1f 100644 --- a/paddle/phi/api/lib/kernel_dispatch.h +++ b/paddle/phi/api/lib/kernel_dispatch.h @@ -90,7 +90,7 @@ struct ArgsIterator { struct KernelKeyParser : ArgsIterator { KernelKeySet key_set; - bool disable_cudnn = false; + bool disable_gpudnn = false; // this dtype_set is used for cache multi-inputs dtype and used for // data_promote DataTypeSet dtype_set{DataType::UNDEFINED}; @@ -101,9 +101,9 @@ struct KernelKeyParser : ArgsIterator { // assign Backend BackendSet tensor_backend_set = detail::GetTensorBackendSet(tensor); key_set.backend_set = key_set.backend_set | tensor_backend_set; - // tensor's attribute use_cudnn=False, explicitly disable cudnn kernel - if (tensor_backend_set == BackendSet(Backend::GPU) || disable_cudnn) { - disable_cudnn = true; + // tensor's attribute use_gpudnn=False, explicitly disable gpudnn kernel + if (tensor_backend_set == BackendSet(Backend::GPU) || disable_gpudnn) { + disable_gpudnn = true; key_set.backend_set = key_set.backend_set - BackendSet(Backend::GPUDNN); } // assign DataLayout diff --git a/paddle/phi/core/dense_tensor.cc b/paddle/phi/core/dense_tensor.cc index 09ce2414150e1c5f9d737e39ec4cd9197d6772a4..3c6f306e8c8f0dfa1625c31391ff553f6e3a1a77 100644 --- a/paddle/phi/core/dense_tensor.cc +++ b/paddle/phi/core/dense_tensor.cc @@ -200,7 +200,7 @@ void DenseTensor::set_meta(const DenseTensorMeta& meta) { meta_.layout = meta.layout; meta_.lod = meta.lod; meta_.offset = meta.offset; - meta_.use_cudnn = meta.use_cudnn; + meta_.use_gpudnn = meta.use_gpudnn; } /* @jim19930609: This interface will be further modified until we finalized the diff --git a/paddle/phi/core/dense_tensor_impl.cc b/paddle/phi/core/dense_tensor_impl.cc index 3906282187d4c684b15b85691ad32d68fb5dd1ed..2ddbaa589e6f6293ce370b18b8feb23e401a7c36 100644 --- a/paddle/phi/core/dense_tensor_impl.cc +++ b/paddle/phi/core/dense_tensor_impl.cc @@ -357,7 +357,7 @@ DenseTensor& DenseTensor::ShareDataWith(const DenseTensor& src) { meta_.dtype = src.meta_.dtype; meta_.layout = src.meta_.layout; meta_.offset = src.meta_.offset; - meta_.use_cudnn = src.meta_.use_cudnn; + meta_.use_gpudnn = src.meta_.use_gpudnn; storage_properties_ = std::move(CopyStorageProperties(src.storage_properties_)); #ifdef PADDLE_WITH_MKLDNN diff --git a/paddle/phi/core/tensor_meta.cc b/paddle/phi/core/tensor_meta.cc index 44b2dee358ad5fa1cf94e73235f2c25b57618c90..cb2867c1dbee105ae143b5517c5edaf214aa7437 100644 --- a/paddle/phi/core/tensor_meta.cc +++ b/paddle/phi/core/tensor_meta.cc @@ -16,11 +16,11 @@ limitations under the License. */ namespace phi { -DenseTensorMeta::DenseTensorMeta() { use_cudnn = true; } +DenseTensorMeta::DenseTensorMeta() { use_gpudnn = true; } DenseTensorMeta::DenseTensorMeta(DataType dtype, const DDim& dims) : dims(dims), dtype(dtype) { - use_cudnn = true; + use_gpudnn = true; } DenseTensorMeta::DenseTensorMeta(DataType dtype, @@ -28,7 +28,7 @@ DenseTensorMeta::DenseTensorMeta(DataType dtype, DataLayout layout, size_t offset) : dims(dims), dtype(dtype), layout(layout), offset(offset) { - use_cudnn = true; + use_gpudnn = true; } DenseTensorMeta::DenseTensorMeta(DataType dtype, @@ -37,7 +37,7 @@ DenseTensorMeta::DenseTensorMeta(DataType dtype, const LoD& lod, size_t offset) : dims(dims), dtype(dtype), layout(layout), lod(lod), offset(offset) { - use_cudnn = true; + use_gpudnn = true; } bool DenseTensorMeta::valid() const noexcept { diff --git a/paddle/phi/core/tensor_meta.h b/paddle/phi/core/tensor_meta.h index 789a4422e25d1f8f128bbb31bfd5fa766d5b2fcd..1d969ef7b3e132f6b13ac7a35873697d6b3e341a 100644 --- a/paddle/phi/core/tensor_meta.h +++ b/paddle/phi/core/tensor_meta.h @@ -65,9 +65,9 @@ struct DenseTensorMeta { bool valid() const noexcept; bool is_scalar{false}; - /// \brief Determine whether using CuDNN speed-up library in the new dygraph. + /// \brief Determine whether using gpudnn speed-up library in the new dygraph. /// It maybe also support MKLDNN library in the near future. - bool use_cudnn{true}; + bool use_gpudnn{true}; DDim dims; DataType dtype{DataType::UNDEFINED}; DataLayout layout{DataLayout::NCHW}; @@ -76,7 +76,7 @@ struct DenseTensorMeta { }; inline bool operator==(const DenseTensorMeta& lhs, const DenseTensorMeta& rhs) { - return (lhs.is_scalar == rhs.is_scalar) && lhs.use_cudnn == rhs.use_cudnn && + return (lhs.is_scalar == rhs.is_scalar) && lhs.use_gpudnn == rhs.use_gpudnn && (lhs.dims == rhs.dims) && (lhs.dtype == rhs.dtype) && (lhs.layout == rhs.layout) && (lhs.lod == rhs.lod) && (lhs.offset == rhs.offset); diff --git a/python/paddle/fluid/dygraph/nn.py b/python/paddle/fluid/dygraph/nn.py index 8ad463f2d3ad3dc235a2ab25451eede899123009..bff3d5aacb935bb9d05d064d1427f6debc9932cc 100644 --- a/python/paddle/fluid/dygraph/nn.py +++ b/python/paddle/fluid/dygraph/nn.py @@ -671,7 +671,7 @@ class Pool2D(layers.Layer): def forward(self, input): if _non_static_mode(): if not self._use_mkldnn and in_dygraph_mode(): - input = input._use_cudnn(self._use_cudnn) + input = input._use_gpudnn(self._use_cudnn) return _C_ops.pool2d( input, self._pool_size, diff --git a/python/paddle/fluid/dygraph/varbase_patch_methods.py b/python/paddle/fluid/dygraph/varbase_patch_methods.py index ee57dc8cc2c4cbef777f083e1b0727f8b54ce3c5..e9b963a781db9fb6578d2f912cbfdb08b7c30383 100644 --- a/python/paddle/fluid/dygraph/varbase_patch_methods.py +++ b/python/paddle/fluid/dygraph/varbase_patch_methods.py @@ -886,8 +886,8 @@ def monkey_patch_varbase(): self.get_tensor()._clear() @framework.dygraph_only - def _use_cudnn(self, use_cudnn=True): - return self._tensor_use_cudnn(use_cudnn) + def _use_gpudnn(self, use_gpudnn=True): + return self._tensor_use_gpudnn(use_gpudnn) @framework.dygraph_only def _uva(self, device_id=0): @@ -1073,7 +1073,7 @@ def monkey_patch_varbase(): setattr(core.eager.Tensor, "_uva", _uva) setattr(core.eager.Tensor, "_clear_data", _clear_data) setattr(core.eager.Tensor, "__hash__", __hash__) - setattr(core.eager.Tensor, "_use_cudnn", _use_cudnn) + setattr(core.eager.Tensor, "_use_gpudnn", _use_gpudnn) else: setattr(core.VarBase, "__name__", "Tensor") setattr(core.VarBase, "grad", grad) diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index e066be12eb78417ccd3908e1945505b19b8837bf..b59e0c4c800e8d64b55413ef7ac7fcd077067a86 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -1852,7 +1852,7 @@ def pool2d( pool_padding = update_padding(pool_padding, data_format) if in_dygraph_mode(): - input = input._use_cudnn(use_cudnn) + input = input._use_gpudnn(use_cudnn) return _C_ops.pool2d( input, pool_size, diff --git a/python/paddle/fluid/tests/unittests/test_egr_python_api.py b/python/paddle/fluid/tests/unittests/test_egr_python_api.py index 25cdfe82af39b6e547c29f201ab68707c6bb83f5..c5ecca10b2e550bce78c23fa4e24a3b8a7cf7a77 100644 --- a/python/paddle/fluid/tests/unittests/test_egr_python_api.py +++ b/python/paddle/fluid/tests/unittests/test_egr_python_api.py @@ -899,20 +899,20 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase): x._clear() self.assertFalse(x._is_initialized()) - def test_use_cudnn(self): + def test_use_gpudnn(self): np_x = np.random.random((3, 8, 8)) with _test_eager_guard(): self.assertTrue(in_dygraph_mode()) x = paddle.to_tensor(np_x, dtype="float64") - y = x._use_cudnn(False) + y = x._use_gpudnn(False) np.testing.assert_array_equal(x.numpy(), y.numpy()) - y = x._use_cudnn(True) + y = x._use_gpudnn(True) np.testing.assert_array_equal(x.numpy(), y.numpy()) self.assertFalse(in_dygraph_mode()) x = paddle.to_tensor(np_x, dtype="float64") with self.assertRaises(AttributeError): - x = x._use_cudnn(False) + x = x._use_gpudnn(False) class EagerParamBaseUsageTestCase(unittest.TestCase): diff --git a/python/paddle/nn/functional/pooling.py b/python/paddle/nn/functional/pooling.py index 3b8660a677cba9034b8052f4193822c22bfdbb32..9f92a6057b592b0141726ffcb8e1b3539efe4517 100755 --- a/python/paddle/nn/functional/pooling.py +++ b/python/paddle/nn/functional/pooling.py @@ -1690,7 +1690,7 @@ def adaptive_avg_pool1d(x, output_size, name=None): x = unsqueeze(x, [2]) if in_dygraph_mode(): - x = x._use_cudnn(False) + x = x._use_gpudnn(False) pool_out = _C_ops.pool2d( x, pool_size, @@ -1827,7 +1827,7 @@ def adaptive_avg_pool2d(x, output_size, data_format='NCHW', name=None): output_size = utils._convert_to_tensor_list(output_size) if in_dygraph_mode(): - x = x._use_cudnn(False) + x = x._use_gpudnn(False) return _C_ops.pool2d( x, output_size, @@ -1972,7 +1972,7 @@ def adaptive_avg_pool3d(x, output_size, data_format='NCDHW', name=None): output_size[2] = in_w if in_dygraph_mode(): - x = x._use_cudnn(False) + x = x._use_gpudnn(False) return _C_ops.pool3d( x, output_size, diff --git a/python/paddle/nn/functional/vision.py b/python/paddle/nn/functional/vision.py index 54ed9903744e1b81ed2b8500969579369a8df9d4..c01f962d79dbc2c8ae08c85d8c47c7f6a75a20a2 100644 --- a/python/paddle/nn/functional/vision.py +++ b/python/paddle/nn/functional/vision.py @@ -91,7 +91,7 @@ def affine_grid(theta, out_shape, align_corners=True, name=None): if isinstance(out_shape, Variable) else out_shape ) - theta = theta._use_cudnn(use_cudnn) + theta = theta._use_gpudnn(use_cudnn) return _C_ops.affine_grid(theta, _out_shape, align_corners) elif in_dynamic_mode(): _out_shape = (