未验证 提交 de2a3942 编写于 作者: C Chen Weihang 提交者: GitHub

remove inner_place using (#41768)

上级 192f6f85
......@@ -21,7 +21,7 @@ namespace egr {
static inline bool NeedCast(const paddle::experimental::Tensor& tensor,
const paddle::experimental::DataType& dst_dtype) {
auto place = tensor.inner_place();
auto place = tensor.place();
auto data_type = tensor.dtype();
if (paddle::platform::is_gpu_place(place) ||
paddle::platform::is_cuda_pinned_place(place) ||
......
......@@ -20,7 +20,7 @@ namespace egr {
static inline bool NeedCast(const paddle::experimental::Tensor& tensor,
const paddle::experimental::DataType& dst_dtype) {
auto place = tensor.inner_place();
auto place = tensor.place();
auto data_type = tensor.dtype();
if (paddle::platform::is_gpu_place(place) ||
paddle::platform::is_cuda_pinned_place(place) ||
......
......@@ -151,7 +151,7 @@ void GradNodeBase::SetGradInMeta(const paddle::experimental::Tensor& fwd_out,
"which is illegal."));
meta.SetTensorMeta(dense_tensor->meta());
meta.SetPlace(fwd_out.inner_place());
meta.SetPlace(fwd_out.place());
if (paddle::framework::IsComplexType(
paddle::framework::TransToProtoVarType(dense_tensor->type()))) {
......@@ -210,7 +210,7 @@ void GradNodeBase::SetGradInMeta(
"with phi::DataType::UNDEFINED,"
"which is illegal."));
meta.SetTensorMeta(dense_tensor->meta());
meta.SetPlace(fwd_out_tensor.inner_place());
meta.SetPlace(fwd_out_tensor.place());
if (paddle::framework::IsComplexType(
paddle::framework::TransToProtoVarType(dense_tensor->type()))) {
......@@ -256,7 +256,7 @@ void GradNodeBase::SetGradOutMeta(const paddle::experimental::Tensor& fwd_in,
"with phi::DataType::UNDEFINED,"
"which is illegal."));
meta.SetTensorMeta(dense_tensor->meta());
meta.SetPlace(fwd_in.inner_place());
meta.SetPlace(fwd_in.place());
}
} else {
VLOG(6) << "Unable to initialize the DenseTensorMeta of GradSlotMeta with "
......@@ -301,7 +301,7 @@ void GradNodeBase::SetGradOutMeta(
"phi::DataType::UNDEFINED,"
"which is illegal."));
meta.SetTensorMeta(dense_tensor->meta());
meta.SetPlace(fwd_in_tensor.inner_place());
meta.SetPlace(fwd_in_tensor.place());
}
} else {
VLOG(6) << "Unable to initialize the DenseTensorMeta of GradSlotMeta "
......
......@@ -317,11 +317,11 @@ inline void CheckTensor(const paddle::experimental::Tensor& pre,
paddle::framework::DataType2String(pre.dtype()),
paddle::framework::DataType2String(post.dtype())));
PADDLE_ENFORCE_EQ(
pre.inner_place(), post.inner_place(),
pre.place(), post.place(),
paddle::platform::errors::PermissionDenied(
"The place of tensor before(%s) and after(%s) "
"hook are not consistent",
pre.inner_place().DebugString(), post.inner_place().DebugString()));
pre.place().DebugString(), post.place().DebugString()));
}
}
......
......@@ -53,7 +53,7 @@ void GradTensorHolder::CopyValueFromTensor(
paddle::experimental::Tensor& buffer_tensor = buffer_[slot_id][rank];
if ((!buffer_tensor.defined() || !buffer_tensor.initialized())) {
// Perform deep copy here
buffer_tensor.copy_(t, t.inner_place(), false);
buffer_tensor.copy_(t, t.place(), false);
buffer_tensor.set_autograd_meta(t.mutable_autograd_meta());
} else {
......@@ -66,7 +66,7 @@ void GradTensorHolder::CopyValueFromTensor(
if (t.defined()) {
// Fill 1.0, use full to support complex, one_like don't support it.
buffer_[slot_id][rank] =
paddle::experimental::full(t.shape(), 1, t.dtype(), t.inner_place());
paddle::experimental::full(t.shape(), 1, t.dtype(), t.place());
}
}
}
......
......@@ -62,7 +62,7 @@ class GradNodePyLayer : public GradNodeBase {
} else {
forward_outputs_meta_[i].emplace_back();
}
forward_outputs_place_[i].emplace_back(tensor->inner_place());
forward_outputs_place_[i].emplace_back(tensor->place());
}
}
}
......
......@@ -96,7 +96,7 @@ TEST(Tensor, MemberFunction) {
CHECK_EQ(et3.dims(), expected_dim);
CHECK_EQ(et3.type(), paddle::experimental::DataType::FLOAT32);
CHECK_EQ(et3.layout(), paddle::experimental::DataLayout::NCHW);
CHECK(paddle::platform::is_cpu_place(et3.inner_place()));
CHECK(paddle::platform::is_cpu_place(et3.place()));
VLOG(6) << "Get impl";
auto* dt3_ptr =
std::dynamic_pointer_cast<phi::DenseTensor>(et3.impl())->data<float>();
......
......@@ -137,7 +137,7 @@ void InitTensorWithTensor(TensorObject* self,
const paddle::platform::Place& place,
const std::string& name) {
self->tensor.set_name(name);
if (place == src.inner_place()) {
if (place == src.place()) {
auto impl = std::static_pointer_cast<phi::DenseTensor>(src.impl());
self->tensor.set_impl(impl);
VLOG(4) << "Same place, do ShareDataWith";
......
......@@ -554,32 +554,32 @@ static PyObject* eager_api_async_read(PyObject* self, PyObject* args,
src.is_gpu_pinned(), true,
platform::errors::InvalidArgument("Required `src` device should be "
"CUDAPinnedPlace, but received %d.",
src.inner_place()));
src.place()));
PADDLE_ENFORCE_EQ(
dst.is_gpu(), true,
platform::errors::InvalidArgument(
"Required `dst` device should be CUDAPlace, but received %d.",
dst.inner_place()));
dst.place()));
PADDLE_ENFORCE_EQ(
index.is_cpu(), true,
platform::errors::InvalidArgument(
"Required `index` device should be CPUPlace, but received %d.",
index.inner_place()));
index.place()));
PADDLE_ENFORCE_EQ(buffer.is_gpu_pinned(), true,
platform::errors::InvalidArgument(
"Required `buffer` device should be CUDAPinnedPlace, "
"but received %d.",
buffer.inner_place()));
buffer.place()));
PADDLE_ENFORCE_EQ(
offset.is_cpu(), true,
platform::errors::InvalidArgument(
"Required `offset` device should be CPUPlace, but received %d.",
offset.inner_place()));
offset.place()));
PADDLE_ENFORCE_EQ(
count.is_cpu(), true,
platform::errors::InvalidArgument(
"Required `count` device should be CPUPlace, but received %d.",
count.inner_place()));
count.place()));
auto& src_tensor = src;
auto* dst_tensor = &dst;
......@@ -701,22 +701,22 @@ static PyObject* eager_api_async_write(PyObject* self, PyObject* args,
src.is_gpu(), true,
platform::errors::InvalidArgument(
"Required `src` device should be CUDAPlace, but received %d. ",
src.inner_place()));
src.place()));
PADDLE_ENFORCE_EQ(dst.is_gpu_pinned(), true,
platform::errors::InvalidArgument(
"Required `dst` device should be CUDAPinnedPlace, "
"but received %d. ",
dst.inner_place()));
dst.place()));
PADDLE_ENFORCE_EQ(
offset.is_cpu(), true,
platform::errors::InvalidArgument("Required `offset` device should "
"be CPUPlace, but received %d. ",
offset.inner_place()));
offset.place()));
PADDLE_ENFORCE_EQ(
count.is_cpu(), true,
platform::errors::InvalidArgument(
"Required `count` device should be CPUPlace, but received %d. ",
count.inner_place()));
count.place()));
// TODO(daisiming): In future, add index as arguments following
// async_read.
......
......@@ -342,11 +342,11 @@ static PyObject* tensor_method_copy_(TensorObject* self, PyObject* args,
->SetPersistable(
egr::EagerUtils::autograd_meta(&(src_tensor))->Persistable());
if (src_tensor.initialized()) {
self->tensor.copy_(src_tensor, src_tensor.inner_place(), blocking);
self->tensor.copy_(src_tensor, src_tensor.place(), blocking);
}
} else {
if (src_tensor.initialized()) {
self->tensor.copy_(src_tensor, self->tensor.inner_place(), blocking);
self->tensor.copy_(src_tensor, self->tensor.place(), blocking);
}
}
......@@ -934,7 +934,7 @@ static PyObject* tensor_method__setitem_eager_tensor(TensorObject* self,
} else {
SetTensorFromPyArray(
static_cast<phi::DenseTensor*>(value_tensor_tmp.impl().get()),
value, value_tensor_tmp.inner_place(), false);
value, value_tensor_tmp.place(), false);
}
value_tensor = value_tensor_tmp;
......@@ -1018,7 +1018,7 @@ static PyObject* tensor_method__setitem_eager_tensor(TensorObject* self,
platform::Place(platform::CPUPlace()), false);
#endif
} else {
SetTensorFromPyArray(self_tensor, self_numpy, self->tensor.inner_place(),
SetTensorFromPyArray(self_tensor, self_numpy, self->tensor.place(),
false);
}
}
......@@ -1367,7 +1367,7 @@ static PyObject* tensor_method__share_memory(TensorObject* self, PyObject* args,
PyObject* kwargs) {
EAGER_TRY
#ifndef _WIN32
PADDLE_ENFORCE_EQ(platform::is_cpu_place(self->tensor.inner_place()), true,
PADDLE_ENFORCE_EQ(platform::is_cpu_place(self->tensor.place()), true,
platform::errors::InvalidArgument(
"Sharing memory only support CPU Tensor currently"));
// 1. get LoDTensor
......@@ -1419,7 +1419,7 @@ static PyObject* tensor_method__uva(TensorObject* self, PyObject* args,
platform::errors::InvalidArgument(
"Unified virtual addressing only support "
"DenseTensor currently."));
PADDLE_ENFORCE_EQ(platform::is_cpu_place(self->tensor.inner_place()), true,
PADDLE_ENFORCE_EQ(platform::is_cpu_place(self->tensor.place()), true,
platform::errors::InvalidArgument(
"Unified virtual addressing only support "
"CPU Tensor currently."));
......
......@@ -108,7 +108,7 @@ int tensor_properties_set_grad(TensorObject* self, PyObject* value,
"Detected NULL grad"
"Please check if you have manually cleared"
"the grad inside autograd_meta"));
grad->copy_(src, self->tensor.inner_place(), true);
grad->copy_(src, self->tensor.place(), true);
return 0;
EAGER_CATCH_AND_THROW_RETURN_NEG
}
......@@ -160,14 +160,14 @@ PyObject* tensor_properties_get_shape(TensorObject* self, void* closure) {
PyObject* tensor_properties_get_place(TensorObject* self, void* closure) {
EAGER_TRY
return ToPyObject(self->tensor.inner_place());
return ToPyObject(self->tensor.place());
EAGER_CATCH_AND_THROW_RETURN_NULL
}
PyObject* tensor_properties_get_place_str(TensorObject* self, void* closure) {
EAGER_TRY
std::stringstream ostr;
ostr << self->tensor.inner_place();
ostr << self->tensor.place();
return ToPyObject(ostr.str());
EAGER_CATCH_AND_THROW_RETURN_NULL
}
......
......@@ -249,21 +249,11 @@ class PADDLE_API Tensor final {
/**
* @brief Return the place (device) of Tensor.
* This is a deprecated method and may be removed in the future!
*
* @return Place
*/
Place place() const;
/**
* @brief Return the place (device) of Tensor.
*
* This is a deprecated method and may be removed in the future!!!
*
* @return Place
*/
Place inner_place() const;
/**
* @brief Determine whether the tensor device is CPU
*
......
......@@ -126,7 +126,7 @@ Backend ParseBackend(const Place& place) {
return phi::TransToPhiBackend(place);
}
Backend ParseBackend(const Tensor& tensor) {
return phi::TransToPhiBackend(tensor.inner_place());
return phi::TransToPhiBackend(tensor.place());
}
Backend ParseBackendWithInputOrder(const Place& place, const Tensor& tensor) {
......
......@@ -163,25 +163,12 @@ Place Tensor::place() const {
return impl_->place();
}
Place Tensor::inner_place() const {
PADDLE_ENFORCE_NOT_NULL(
impl_,
phi::errors::PermissionDenied(
"Null pointer error, the impl_ of Tensor should not be "
"Null when calling Tensor::inner_place()."));
return impl_->place();
}
bool Tensor::is_cpu() const {
return paddle::platform::is_cpu_place(inner_place());
}
bool Tensor::is_cpu() const { return paddle::platform::is_cpu_place(place()); }
bool Tensor::is_gpu() const {
return paddle::platform::is_gpu_place(inner_place());
}
bool Tensor::is_gpu() const { return paddle::platform::is_gpu_place(place()); }
bool Tensor::is_gpu_pinned() const {
return paddle::platform::is_cuda_pinned_place(inner_place());
return paddle::platform::is_cuda_pinned_place(place());
}
/* Part 4: Data Access methods */
......
......@@ -97,16 +97,15 @@ void Tensor::copy_(const Tensor &src,
name(),
src.name()));
PADDLE_ENFORCE_EQ(target_place,
inner_place(),
place(),
phi::errors::PreconditionNotMet(
"Place is different of dst tensor and args %s, which "
"current tensor holds %s "
"Copy cannot be performed!",
target_place,
inner_place()));
kernel_key_set.backend_set =
kernel_key_set.backend_set |
BackendSet(phi::TransToPhiBackend(inner_place()));
place()));
kernel_key_set.backend_set = kernel_key_set.backend_set |
BackendSet(phi::TransToPhiBackend(place()));
} else {
// Deep Copy AutoGrad info from src to self.
*autograd_meta_ = *(src.autograd_meta_);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册