未验证 提交 41f15537 编写于 作者: H HongyuJia 提交者: GitHub

rename use_cudnn to use_gpudnn in phi (#48443)

上级 a559a664
...@@ -1450,28 +1450,28 @@ static PyObject* tensor__copy_gradient_from(TensorObject* self, ...@@ -1450,28 +1450,28 @@ static PyObject* tensor__copy_gradient_from(TensorObject* self,
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
static PyObject* tensor__use_cudnn(TensorObject* self, static PyObject* tensor__use_gpudnn(TensorObject* self,
PyObject* args, PyObject* args,
PyObject* kwargs) { PyObject* kwargs) {
EAGER_TRY EAGER_TRY
PADDLE_ENFORCE(self->tensor.defined() && self->tensor.is_dense_tensor(), PADDLE_ENFORCE(self->tensor.defined() && self->tensor.is_dense_tensor(),
paddle::platform::errors::Fatal( paddle::platform::errors::Fatal(
"function _use_cudnn is only effective for DenseTensor")); "function _use_gpudnn is only effective for DenseTensor"));
bool use_cudnn = pybind::CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 0), 0); bool use_gpudnn = pybind::CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 0), 0);
// Set the same use_cudnn attribute, return directly // Set the same use_gpudnn attribute, return directly
phi::DenseTensor* dense_tensor = phi::DenseTensor* dense_tensor =
static_cast<phi::DenseTensor*>(self->tensor.impl().get()); static_cast<phi::DenseTensor*>(self->tensor.impl().get());
phi::DenseTensorMeta* dense_tensor_meta = phi::DenseTensorMeta* dense_tensor_meta =
phi::DenseTensorUtils::GetMutableMeta(dense_tensor); phi::DenseTensorUtils::GetMutableMeta(dense_tensor);
if (use_cudnn == dense_tensor_meta->use_cudnn) { if (use_gpudnn == dense_tensor_meta->use_gpudnn) {
return ToPyObject(self->tensor); return ToPyObject(self->tensor);
} }
// Share all other members of Tensor except use_cudnn // Share all other members of Tensor except use_gpudnn
phi::DenseTensorMeta target_dense_meta = *dense_tensor_meta; phi::DenseTensorMeta target_dense_meta = *dense_tensor_meta;
target_dense_meta.use_cudnn = use_cudnn; target_dense_meta.use_gpudnn = use_gpudnn;
phi::DenseTensor target_dense_tensor; phi::DenseTensor target_dense_tensor;
target_dense_tensor.ShareDataWith(*dense_tensor); target_dense_tensor.ShareDataWith(*dense_tensor);
target_dense_tensor.set_meta(target_dense_meta); target_dense_tensor.set_meta(target_dense_meta);
...@@ -1481,7 +1481,7 @@ static PyObject* tensor__use_cudnn(TensorObject* self, ...@@ -1481,7 +1481,7 @@ static PyObject* tensor__use_cudnn(TensorObject* self,
self->tensor.name()); self->tensor.name());
target_tensor.set_autograd_meta(self->tensor.mutable_autograd_meta()); target_tensor.set_autograd_meta(self->tensor.mutable_autograd_meta());
VLOG(4) << "Tensor: " << target_tensor.name() VLOG(4) << "Tensor: " << target_tensor.name()
<< " set use_cudnn = " << use_cudnn; << " set use_gpudnn = " << use_gpudnn;
return ToPyObject(target_tensor); return ToPyObject(target_tensor);
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
...@@ -2053,8 +2053,8 @@ PyMethodDef variable_methods[] = { ...@@ -2053,8 +2053,8 @@ PyMethodDef variable_methods[] = {
(PyCFunction)(void (*)(void))tensor__copy_gradient_from, (PyCFunction)(void (*)(void))tensor__copy_gradient_from,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, NULL},
{"_tensor_use_cudnn", {"_tensor_use_gpudnn",
(PyCFunction)(void (*)(void))tensor__use_cudnn, (PyCFunction)(void (*)(void))tensor__use_gpudnn,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, NULL},
/** the methods to adapt old dygraph, will be removed in the future **/ /** the methods to adapt old dygraph, will be removed in the future **/
......
...@@ -57,7 +57,7 @@ BackendSet GetTensorBackendSet(const phi::TensorBase& t) { ...@@ -57,7 +57,7 @@ BackendSet GetTensorBackendSet(const phi::TensorBase& t) {
phi::Backend backend_key = phi::TransToPhiBackend(t.place()); phi::Backend backend_key = phi::TransToPhiBackend(t.place());
BackendSet backend_set(backend_key); BackendSet backend_set(backend_key);
if (backend_key == Backend::GPU && phi::DenseTensor::classof(&t) && if (backend_key == Backend::GPU && phi::DenseTensor::classof(&t) &&
static_cast<const phi::DenseTensor&>(t).meta().use_cudnn) { static_cast<const phi::DenseTensor&>(t).meta().use_gpudnn) {
backend_set = backend_set | BackendSet(Backend::GPUDNN); backend_set = backend_set | BackendSet(Backend::GPUDNN);
} }
return backend_set; return backend_set;
...@@ -126,7 +126,7 @@ Backend ParseBackend(const Tensor& tensor) { ...@@ -126,7 +126,7 @@ Backend ParseBackend(const Tensor& tensor) {
Backend backend_key = phi::TransToPhiBackend(tensor.place()); Backend backend_key = phi::TransToPhiBackend(tensor.place());
if (backend_key == Backend::GPU && if (backend_key == Backend::GPU &&
phi::DenseTensor::classof(tensor.impl().get()) && phi::DenseTensor::classof(tensor.impl().get()) &&
static_cast<phi::DenseTensor*>(tensor.impl().get())->meta().use_cudnn) { static_cast<phi::DenseTensor*>(tensor.impl().get())->meta().use_gpudnn) {
return Backend::GPUDNN; return Backend::GPUDNN;
} }
return backend_key; return backend_key;
......
...@@ -90,7 +90,7 @@ struct ArgsIterator { ...@@ -90,7 +90,7 @@ struct ArgsIterator {
struct KernelKeyParser : ArgsIterator<KernelKeyParser> { struct KernelKeyParser : ArgsIterator<KernelKeyParser> {
KernelKeySet key_set; KernelKeySet key_set;
bool disable_cudnn = false; bool disable_gpudnn = false;
// this dtype_set is used for cache multi-inputs dtype and used for // this dtype_set is used for cache multi-inputs dtype and used for
// data_promote // data_promote
DataTypeSet dtype_set{DataType::UNDEFINED}; DataTypeSet dtype_set{DataType::UNDEFINED};
...@@ -101,9 +101,9 @@ struct KernelKeyParser : ArgsIterator<KernelKeyParser> { ...@@ -101,9 +101,9 @@ struct KernelKeyParser : ArgsIterator<KernelKeyParser> {
// assign Backend // assign Backend
BackendSet tensor_backend_set = detail::GetTensorBackendSet(tensor); BackendSet tensor_backend_set = detail::GetTensorBackendSet(tensor);
key_set.backend_set = key_set.backend_set | tensor_backend_set; key_set.backend_set = key_set.backend_set | tensor_backend_set;
// tensor's attribute use_cudnn=False, explicitly disable cudnn kernel // tensor's attribute use_gpudnn=False, explicitly disable gpudnn kernel
if (tensor_backend_set == BackendSet(Backend::GPU) || disable_cudnn) { if (tensor_backend_set == BackendSet(Backend::GPU) || disable_gpudnn) {
disable_cudnn = true; disable_gpudnn = true;
key_set.backend_set = key_set.backend_set - BackendSet(Backend::GPUDNN); key_set.backend_set = key_set.backend_set - BackendSet(Backend::GPUDNN);
} }
// assign DataLayout // assign DataLayout
......
...@@ -200,7 +200,7 @@ void DenseTensor::set_meta(const DenseTensorMeta& meta) { ...@@ -200,7 +200,7 @@ void DenseTensor::set_meta(const DenseTensorMeta& meta) {
meta_.layout = meta.layout; meta_.layout = meta.layout;
meta_.lod = meta.lod; meta_.lod = meta.lod;
meta_.offset = meta.offset; meta_.offset = meta.offset;
meta_.use_cudnn = meta.use_cudnn; meta_.use_gpudnn = meta.use_gpudnn;
} }
/* @jim19930609: This interface will be further modified until we finalized the /* @jim19930609: This interface will be further modified until we finalized the
......
...@@ -357,7 +357,7 @@ DenseTensor& DenseTensor::ShareDataWith(const DenseTensor& src) { ...@@ -357,7 +357,7 @@ DenseTensor& DenseTensor::ShareDataWith(const DenseTensor& src) {
meta_.dtype = src.meta_.dtype; meta_.dtype = src.meta_.dtype;
meta_.layout = src.meta_.layout; meta_.layout = src.meta_.layout;
meta_.offset = src.meta_.offset; meta_.offset = src.meta_.offset;
meta_.use_cudnn = src.meta_.use_cudnn; meta_.use_gpudnn = src.meta_.use_gpudnn;
storage_properties_ = storage_properties_ =
std::move(CopyStorageProperties(src.storage_properties_)); std::move(CopyStorageProperties(src.storage_properties_));
#ifdef PADDLE_WITH_MKLDNN #ifdef PADDLE_WITH_MKLDNN
......
...@@ -16,11 +16,11 @@ limitations under the License. */ ...@@ -16,11 +16,11 @@ limitations under the License. */
namespace phi { namespace phi {
DenseTensorMeta::DenseTensorMeta() { use_cudnn = true; } DenseTensorMeta::DenseTensorMeta() { use_gpudnn = true; }
DenseTensorMeta::DenseTensorMeta(DataType dtype, const DDim& dims) DenseTensorMeta::DenseTensorMeta(DataType dtype, const DDim& dims)
: dims(dims), dtype(dtype) { : dims(dims), dtype(dtype) {
use_cudnn = true; use_gpudnn = true;
} }
DenseTensorMeta::DenseTensorMeta(DataType dtype, DenseTensorMeta::DenseTensorMeta(DataType dtype,
...@@ -28,7 +28,7 @@ DenseTensorMeta::DenseTensorMeta(DataType dtype, ...@@ -28,7 +28,7 @@ DenseTensorMeta::DenseTensorMeta(DataType dtype,
DataLayout layout, DataLayout layout,
size_t offset) size_t offset)
: dims(dims), dtype(dtype), layout(layout), offset(offset) { : dims(dims), dtype(dtype), layout(layout), offset(offset) {
use_cudnn = true; use_gpudnn = true;
} }
DenseTensorMeta::DenseTensorMeta(DataType dtype, DenseTensorMeta::DenseTensorMeta(DataType dtype,
...@@ -37,7 +37,7 @@ DenseTensorMeta::DenseTensorMeta(DataType dtype, ...@@ -37,7 +37,7 @@ DenseTensorMeta::DenseTensorMeta(DataType dtype,
const LoD& lod, const LoD& lod,
size_t offset) size_t offset)
: dims(dims), dtype(dtype), layout(layout), lod(lod), offset(offset) { : dims(dims), dtype(dtype), layout(layout), lod(lod), offset(offset) {
use_cudnn = true; use_gpudnn = true;
} }
bool DenseTensorMeta::valid() const noexcept { bool DenseTensorMeta::valid() const noexcept {
......
...@@ -65,9 +65,9 @@ struct DenseTensorMeta { ...@@ -65,9 +65,9 @@ struct DenseTensorMeta {
bool valid() const noexcept; bool valid() const noexcept;
bool is_scalar{false}; bool is_scalar{false};
/// \brief Determine whether using CuDNN speed-up library in the new dygraph. /// \brief Determine whether using gpudnn speed-up library in the new dygraph.
/// It maybe also support MKLDNN library in the near future. /// It maybe also support MKLDNN library in the near future.
bool use_cudnn{true}; bool use_gpudnn{true};
DDim dims; DDim dims;
DataType dtype{DataType::UNDEFINED}; DataType dtype{DataType::UNDEFINED};
DataLayout layout{DataLayout::NCHW}; DataLayout layout{DataLayout::NCHW};
...@@ -76,7 +76,7 @@ struct DenseTensorMeta { ...@@ -76,7 +76,7 @@ struct DenseTensorMeta {
}; };
inline bool operator==(const DenseTensorMeta& lhs, const DenseTensorMeta& rhs) { inline bool operator==(const DenseTensorMeta& lhs, const DenseTensorMeta& rhs) {
return (lhs.is_scalar == rhs.is_scalar) && lhs.use_cudnn == rhs.use_cudnn && return (lhs.is_scalar == rhs.is_scalar) && lhs.use_gpudnn == rhs.use_gpudnn &&
(lhs.dims == rhs.dims) && (lhs.dtype == rhs.dtype) && (lhs.dims == rhs.dims) && (lhs.dtype == rhs.dtype) &&
(lhs.layout == rhs.layout) && (lhs.lod == rhs.lod) && (lhs.layout == rhs.layout) && (lhs.lod == rhs.lod) &&
(lhs.offset == rhs.offset); (lhs.offset == rhs.offset);
......
...@@ -671,7 +671,7 @@ class Pool2D(layers.Layer): ...@@ -671,7 +671,7 @@ class Pool2D(layers.Layer):
def forward(self, input): def forward(self, input):
if _non_static_mode(): if _non_static_mode():
if not self._use_mkldnn and in_dygraph_mode(): if not self._use_mkldnn and in_dygraph_mode():
input = input._use_cudnn(self._use_cudnn) input = input._use_gpudnn(self._use_cudnn)
return _C_ops.pool2d( return _C_ops.pool2d(
input, input,
self._pool_size, self._pool_size,
......
...@@ -886,8 +886,8 @@ def monkey_patch_varbase(): ...@@ -886,8 +886,8 @@ def monkey_patch_varbase():
self.get_tensor()._clear() self.get_tensor()._clear()
@framework.dygraph_only @framework.dygraph_only
def _use_cudnn(self, use_cudnn=True): def _use_gpudnn(self, use_gpudnn=True):
return self._tensor_use_cudnn(use_cudnn) return self._tensor_use_gpudnn(use_gpudnn)
@framework.dygraph_only @framework.dygraph_only
def _uva(self, device_id=0): def _uva(self, device_id=0):
...@@ -1073,7 +1073,7 @@ def monkey_patch_varbase(): ...@@ -1073,7 +1073,7 @@ def monkey_patch_varbase():
setattr(core.eager.Tensor, "_uva", _uva) setattr(core.eager.Tensor, "_uva", _uva)
setattr(core.eager.Tensor, "_clear_data", _clear_data) setattr(core.eager.Tensor, "_clear_data", _clear_data)
setattr(core.eager.Tensor, "__hash__", __hash__) setattr(core.eager.Tensor, "__hash__", __hash__)
setattr(core.eager.Tensor, "_use_cudnn", _use_cudnn) setattr(core.eager.Tensor, "_use_gpudnn", _use_gpudnn)
else: else:
setattr(core.VarBase, "__name__", "Tensor") setattr(core.VarBase, "__name__", "Tensor")
setattr(core.VarBase, "grad", grad) setattr(core.VarBase, "grad", grad)
......
...@@ -1852,7 +1852,7 @@ def pool2d( ...@@ -1852,7 +1852,7 @@ def pool2d(
pool_padding = update_padding(pool_padding, data_format) pool_padding = update_padding(pool_padding, data_format)
if in_dygraph_mode(): if in_dygraph_mode():
input = input._use_cudnn(use_cudnn) input = input._use_gpudnn(use_cudnn)
return _C_ops.pool2d( return _C_ops.pool2d(
input, input,
pool_size, pool_size,
......
...@@ -899,20 +899,20 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase): ...@@ -899,20 +899,20 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase):
x._clear() x._clear()
self.assertFalse(x._is_initialized()) self.assertFalse(x._is_initialized())
def test_use_cudnn(self): def test_use_gpudnn(self):
np_x = np.random.random((3, 8, 8)) np_x = np.random.random((3, 8, 8))
with _test_eager_guard(): with _test_eager_guard():
self.assertTrue(in_dygraph_mode()) self.assertTrue(in_dygraph_mode())
x = paddle.to_tensor(np_x, dtype="float64") x = paddle.to_tensor(np_x, dtype="float64")
y = x._use_cudnn(False) y = x._use_gpudnn(False)
np.testing.assert_array_equal(x.numpy(), y.numpy()) np.testing.assert_array_equal(x.numpy(), y.numpy())
y = x._use_cudnn(True) y = x._use_gpudnn(True)
np.testing.assert_array_equal(x.numpy(), y.numpy()) np.testing.assert_array_equal(x.numpy(), y.numpy())
self.assertFalse(in_dygraph_mode()) self.assertFalse(in_dygraph_mode())
x = paddle.to_tensor(np_x, dtype="float64") x = paddle.to_tensor(np_x, dtype="float64")
with self.assertRaises(AttributeError): with self.assertRaises(AttributeError):
x = x._use_cudnn(False) x = x._use_gpudnn(False)
class EagerParamBaseUsageTestCase(unittest.TestCase): class EagerParamBaseUsageTestCase(unittest.TestCase):
......
...@@ -1690,7 +1690,7 @@ def adaptive_avg_pool1d(x, output_size, name=None): ...@@ -1690,7 +1690,7 @@ def adaptive_avg_pool1d(x, output_size, name=None):
x = unsqueeze(x, [2]) x = unsqueeze(x, [2])
if in_dygraph_mode(): if in_dygraph_mode():
x = x._use_cudnn(False) x = x._use_gpudnn(False)
pool_out = _C_ops.pool2d( pool_out = _C_ops.pool2d(
x, x,
pool_size, pool_size,
...@@ -1827,7 +1827,7 @@ def adaptive_avg_pool2d(x, output_size, data_format='NCHW', name=None): ...@@ -1827,7 +1827,7 @@ def adaptive_avg_pool2d(x, output_size, data_format='NCHW', name=None):
output_size = utils._convert_to_tensor_list(output_size) output_size = utils._convert_to_tensor_list(output_size)
if in_dygraph_mode(): if in_dygraph_mode():
x = x._use_cudnn(False) x = x._use_gpudnn(False)
return _C_ops.pool2d( return _C_ops.pool2d(
x, x,
output_size, output_size,
...@@ -1972,7 +1972,7 @@ def adaptive_avg_pool3d(x, output_size, data_format='NCDHW', name=None): ...@@ -1972,7 +1972,7 @@ def adaptive_avg_pool3d(x, output_size, data_format='NCDHW', name=None):
output_size[2] = in_w output_size[2] = in_w
if in_dygraph_mode(): if in_dygraph_mode():
x = x._use_cudnn(False) x = x._use_gpudnn(False)
return _C_ops.pool3d( return _C_ops.pool3d(
x, x,
output_size, output_size,
......
...@@ -91,7 +91,7 @@ def affine_grid(theta, out_shape, align_corners=True, name=None): ...@@ -91,7 +91,7 @@ def affine_grid(theta, out_shape, align_corners=True, name=None):
if isinstance(out_shape, Variable) if isinstance(out_shape, Variable)
else out_shape else out_shape
) )
theta = theta._use_cudnn(use_cudnn) theta = theta._use_gpudnn(use_cudnn)
return _C_ops.affine_grid(theta, _out_shape, align_corners) return _C_ops.affine_grid(theta, _out_shape, align_corners)
elif in_dynamic_mode(): elif in_dynamic_mode():
_out_shape = ( _out_shape = (
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册