未验证 提交 41f15537 编写于 作者: H HongyuJia 提交者: GitHub

rename use_cudnn to use_gpudnn in phi (#48443)

上级 a559a664
......@@ -1450,28 +1450,28 @@ static PyObject* tensor__copy_gradient_from(TensorObject* self,
EAGER_CATCH_AND_THROW_RETURN_NULL
}
static PyObject* tensor__use_cudnn(TensorObject* self,
PyObject* args,
PyObject* kwargs) {
static PyObject* tensor__use_gpudnn(TensorObject* self,
PyObject* args,
PyObject* kwargs) {
EAGER_TRY
PADDLE_ENFORCE(self->tensor.defined() && self->tensor.is_dense_tensor(),
paddle::platform::errors::Fatal(
"function _use_cudnn is only effective for DenseTensor"));
"function _use_gpudnn is only effective for DenseTensor"));
bool use_cudnn = pybind::CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 0), 0);
bool use_gpudnn = pybind::CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 0), 0);
// Set the same use_cudnn attribute, return directly
// Set the same use_gpudnn attribute, return directly
phi::DenseTensor* dense_tensor =
static_cast<phi::DenseTensor*>(self->tensor.impl().get());
phi::DenseTensorMeta* dense_tensor_meta =
phi::DenseTensorUtils::GetMutableMeta(dense_tensor);
if (use_cudnn == dense_tensor_meta->use_cudnn) {
if (use_gpudnn == dense_tensor_meta->use_gpudnn) {
return ToPyObject(self->tensor);
}
// Share all other members of Tensor except use_cudnn
// Share all other members of Tensor except use_gpudnn
phi::DenseTensorMeta target_dense_meta = *dense_tensor_meta;
target_dense_meta.use_cudnn = use_cudnn;
target_dense_meta.use_gpudnn = use_gpudnn;
phi::DenseTensor target_dense_tensor;
target_dense_tensor.ShareDataWith(*dense_tensor);
target_dense_tensor.set_meta(target_dense_meta);
......@@ -1481,7 +1481,7 @@ static PyObject* tensor__use_cudnn(TensorObject* self,
self->tensor.name());
target_tensor.set_autograd_meta(self->tensor.mutable_autograd_meta());
VLOG(4) << "Tensor: " << target_tensor.name()
<< " set use_cudnn = " << use_cudnn;
<< " set use_gpudnn = " << use_gpudnn;
return ToPyObject(target_tensor);
EAGER_CATCH_AND_THROW_RETURN_NULL
......@@ -2053,8 +2053,8 @@ PyMethodDef variable_methods[] = {
(PyCFunction)(void (*)(void))tensor__copy_gradient_from,
METH_VARARGS | METH_KEYWORDS,
NULL},
{"_tensor_use_cudnn",
(PyCFunction)(void (*)(void))tensor__use_cudnn,
{"_tensor_use_gpudnn",
(PyCFunction)(void (*)(void))tensor__use_gpudnn,
METH_VARARGS | METH_KEYWORDS,
NULL},
/** the methods to adapt old dygraph, will be removed in the future **/
......
......@@ -57,7 +57,7 @@ BackendSet GetTensorBackendSet(const phi::TensorBase& t) {
phi::Backend backend_key = phi::TransToPhiBackend(t.place());
BackendSet backend_set(backend_key);
if (backend_key == Backend::GPU && phi::DenseTensor::classof(&t) &&
static_cast<const phi::DenseTensor&>(t).meta().use_cudnn) {
static_cast<const phi::DenseTensor&>(t).meta().use_gpudnn) {
backend_set = backend_set | BackendSet(Backend::GPUDNN);
}
return backend_set;
......@@ -126,7 +126,7 @@ Backend ParseBackend(const Tensor& tensor) {
Backend backend_key = phi::TransToPhiBackend(tensor.place());
if (backend_key == Backend::GPU &&
phi::DenseTensor::classof(tensor.impl().get()) &&
static_cast<phi::DenseTensor*>(tensor.impl().get())->meta().use_cudnn) {
static_cast<phi::DenseTensor*>(tensor.impl().get())->meta().use_gpudnn) {
return Backend::GPUDNN;
}
return backend_key;
......
......@@ -90,7 +90,7 @@ struct ArgsIterator {
struct KernelKeyParser : ArgsIterator<KernelKeyParser> {
KernelKeySet key_set;
bool disable_cudnn = false;
bool disable_gpudnn = false;
// this dtype_set is used for cache multi-inputs dtype and used for
// data_promote
DataTypeSet dtype_set{DataType::UNDEFINED};
......@@ -101,9 +101,9 @@ struct KernelKeyParser : ArgsIterator<KernelKeyParser> {
// assign Backend
BackendSet tensor_backend_set = detail::GetTensorBackendSet(tensor);
key_set.backend_set = key_set.backend_set | tensor_backend_set;
// tensor's attribute use_cudnn=False, explicitly disable cudnn kernel
if (tensor_backend_set == BackendSet(Backend::GPU) || disable_cudnn) {
disable_cudnn = true;
// tensor's attribute use_gpudnn=False, explicitly disable gpudnn kernel
if (tensor_backend_set == BackendSet(Backend::GPU) || disable_gpudnn) {
disable_gpudnn = true;
key_set.backend_set = key_set.backend_set - BackendSet(Backend::GPUDNN);
}
// assign DataLayout
......
......@@ -200,7 +200,7 @@ void DenseTensor::set_meta(const DenseTensorMeta& meta) {
meta_.layout = meta.layout;
meta_.lod = meta.lod;
meta_.offset = meta.offset;
meta_.use_cudnn = meta.use_cudnn;
meta_.use_gpudnn = meta.use_gpudnn;
}
/* @jim19930609: This interface will be further modified until we finalized the
......
......@@ -357,7 +357,7 @@ DenseTensor& DenseTensor::ShareDataWith(const DenseTensor& src) {
meta_.dtype = src.meta_.dtype;
meta_.layout = src.meta_.layout;
meta_.offset = src.meta_.offset;
meta_.use_cudnn = src.meta_.use_cudnn;
meta_.use_gpudnn = src.meta_.use_gpudnn;
storage_properties_ =
std::move(CopyStorageProperties(src.storage_properties_));
#ifdef PADDLE_WITH_MKLDNN
......
......@@ -16,11 +16,11 @@ limitations under the License. */
namespace phi {
DenseTensorMeta::DenseTensorMeta() { use_cudnn = true; }
DenseTensorMeta::DenseTensorMeta() { use_gpudnn = true; }
DenseTensorMeta::DenseTensorMeta(DataType dtype, const DDim& dims)
: dims(dims), dtype(dtype) {
use_cudnn = true;
use_gpudnn = true;
}
DenseTensorMeta::DenseTensorMeta(DataType dtype,
......@@ -28,7 +28,7 @@ DenseTensorMeta::DenseTensorMeta(DataType dtype,
DataLayout layout,
size_t offset)
: dims(dims), dtype(dtype), layout(layout), offset(offset) {
use_cudnn = true;
use_gpudnn = true;
}
DenseTensorMeta::DenseTensorMeta(DataType dtype,
......@@ -37,7 +37,7 @@ DenseTensorMeta::DenseTensorMeta(DataType dtype,
const LoD& lod,
size_t offset)
: dims(dims), dtype(dtype), layout(layout), lod(lod), offset(offset) {
use_cudnn = true;
use_gpudnn = true;
}
bool DenseTensorMeta::valid() const noexcept {
......
......@@ -65,9 +65,9 @@ struct DenseTensorMeta {
bool valid() const noexcept;
bool is_scalar{false};
/// \brief Determine whether using CuDNN speed-up library in the new dygraph.
/// \brief Determine whether using gpudnn speed-up library in the new dygraph.
/// It maybe also support MKLDNN library in the near future.
bool use_cudnn{true};
bool use_gpudnn{true};
DDim dims;
DataType dtype{DataType::UNDEFINED};
DataLayout layout{DataLayout::NCHW};
......@@ -76,7 +76,7 @@ struct DenseTensorMeta {
};
inline bool operator==(const DenseTensorMeta& lhs, const DenseTensorMeta& rhs) {
return (lhs.is_scalar == rhs.is_scalar) && lhs.use_cudnn == rhs.use_cudnn &&
return (lhs.is_scalar == rhs.is_scalar) && lhs.use_gpudnn == rhs.use_gpudnn &&
(lhs.dims == rhs.dims) && (lhs.dtype == rhs.dtype) &&
(lhs.layout == rhs.layout) && (lhs.lod == rhs.lod) &&
(lhs.offset == rhs.offset);
......
......@@ -671,7 +671,7 @@ class Pool2D(layers.Layer):
def forward(self, input):
if _non_static_mode():
if not self._use_mkldnn and in_dygraph_mode():
input = input._use_cudnn(self._use_cudnn)
input = input._use_gpudnn(self._use_cudnn)
return _C_ops.pool2d(
input,
self._pool_size,
......
......@@ -886,8 +886,8 @@ def monkey_patch_varbase():
self.get_tensor()._clear()
@framework.dygraph_only
def _use_cudnn(self, use_cudnn=True):
return self._tensor_use_cudnn(use_cudnn)
def _use_gpudnn(self, use_gpudnn=True):
return self._tensor_use_gpudnn(use_gpudnn)
@framework.dygraph_only
def _uva(self, device_id=0):
......@@ -1073,7 +1073,7 @@ def monkey_patch_varbase():
setattr(core.eager.Tensor, "_uva", _uva)
setattr(core.eager.Tensor, "_clear_data", _clear_data)
setattr(core.eager.Tensor, "__hash__", __hash__)
setattr(core.eager.Tensor, "_use_cudnn", _use_cudnn)
setattr(core.eager.Tensor, "_use_gpudnn", _use_gpudnn)
else:
setattr(core.VarBase, "__name__", "Tensor")
setattr(core.VarBase, "grad", grad)
......
......@@ -1852,7 +1852,7 @@ def pool2d(
pool_padding = update_padding(pool_padding, data_format)
if in_dygraph_mode():
input = input._use_cudnn(use_cudnn)
input = input._use_gpudnn(use_cudnn)
return _C_ops.pool2d(
input,
pool_size,
......
......@@ -899,20 +899,20 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase):
x._clear()
self.assertFalse(x._is_initialized())
def test_use_cudnn(self):
def test_use_gpudnn(self):
np_x = np.random.random((3, 8, 8))
with _test_eager_guard():
self.assertTrue(in_dygraph_mode())
x = paddle.to_tensor(np_x, dtype="float64")
y = x._use_cudnn(False)
y = x._use_gpudnn(False)
np.testing.assert_array_equal(x.numpy(), y.numpy())
y = x._use_cudnn(True)
y = x._use_gpudnn(True)
np.testing.assert_array_equal(x.numpy(), y.numpy())
self.assertFalse(in_dygraph_mode())
x = paddle.to_tensor(np_x, dtype="float64")
with self.assertRaises(AttributeError):
x = x._use_cudnn(False)
x = x._use_gpudnn(False)
class EagerParamBaseUsageTestCase(unittest.TestCase):
......
......@@ -1690,7 +1690,7 @@ def adaptive_avg_pool1d(x, output_size, name=None):
x = unsqueeze(x, [2])
if in_dygraph_mode():
x = x._use_cudnn(False)
x = x._use_gpudnn(False)
pool_out = _C_ops.pool2d(
x,
pool_size,
......@@ -1827,7 +1827,7 @@ def adaptive_avg_pool2d(x, output_size, data_format='NCHW', name=None):
output_size = utils._convert_to_tensor_list(output_size)
if in_dygraph_mode():
x = x._use_cudnn(False)
x = x._use_gpudnn(False)
return _C_ops.pool2d(
x,
output_size,
......@@ -1972,7 +1972,7 @@ def adaptive_avg_pool3d(x, output_size, data_format='NCDHW', name=None):
output_size[2] = in_w
if in_dygraph_mode():
x = x._use_cudnn(False)
x = x._use_gpudnn(False)
return _C_ops.pool3d(
x,
output_size,
......
......@@ -91,7 +91,7 @@ def affine_grid(theta, out_shape, align_corners=True, name=None):
if isinstance(out_shape, Variable)
else out_shape
)
theta = theta._use_cudnn(use_cudnn)
theta = theta._use_gpudnn(use_cudnn)
return _C_ops.affine_grid(theta, _out_shape, align_corners)
elif in_dynamic_mode():
_out_shape = (
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册