未验证 提交 712ccfbf 编写于 作者: 石晓伟 提交者: GitHub

fix custom ops, test=develop (#39153)

上级 f07b8cbe
......@@ -505,6 +505,12 @@ class PADDLE_API Tensor final {
* in the development of new dygraph. It may be removed in the future.
*/
std::string name_{""};
/**
* Place type: Return the expected memory location if the Tensor is
* uninitialized.
*/
PlaceType place_{PlaceType::kUNK};
};
} // namespace experimental
......
......@@ -78,7 +78,8 @@ Tensor::Tensor(const PlaceType &place)
ConvertExtPlaceToInnerPlace(place))),
std::move(pten::DenseTensorMeta(pten::DataType::UNDEFINED,
framework::make_ddim({}),
pten::DataLayout::NCHW))))) {}
pten::DataLayout::NCHW))))),
place_{place} {}
Tensor::Tensor(const PlaceType &place, const std::vector<int64_t> &shape)
: impl_(std::move(std::make_shared<pten::DenseTensor>(
......@@ -86,7 +87,8 @@ Tensor::Tensor(const PlaceType &place, const std::vector<int64_t> &shape)
ConvertExtPlaceToInnerPlace(place))),
std::move(pten::DenseTensorMeta(pten::DataType::UNDEFINED,
framework::make_ddim(shape),
pten::DataLayout::NCHW))))) {}
pten::DataLayout::NCHW))))),
place_{place} {}
/* Part 2: Dimension, DataType and DataLayout methods */
......@@ -131,17 +133,23 @@ bool Tensor::is_dense_tensor() const {
/* Part 3: Device and Backend methods */
PlaceType Tensor::place() const {
return ConvertInnerPlaceToExtPlace(impl_->place());
if (!impl_->initialized()) {
return place_;
} else {
return ConvertInnerPlaceToExtPlace(impl_->place());
}
}
paddle::platform::Place Tensor::inner_place() const { return impl_->place(); }
paddle::platform::Place Tensor::inner_place() const {
return ConvertExtPlaceToInnerPlace(place());
}
bool Tensor::is_cpu() const {
return paddle::platform::is_cpu_place(impl_->place());
return paddle::platform::is_cpu_place(inner_place());
}
bool Tensor::is_cuda() const {
return paddle::platform::is_gpu_place(impl_->place());
return paddle::platform::is_gpu_place(inner_place());
}
/* Part 4: Data Access methods */
......
......@@ -137,9 +137,7 @@ std::vector<paddle::Tensor> AttrTestForward(
PD_DISPATCH_FLOATING_TYPES(
x.type(), "assign_cpu_kernel", ([&] {
assign_cpu_kernel<data_t>(
x.data<data_t>(),
out.mutable_data<data_t>(paddle::PlaceType::kCPU),
x.size());
x.data<data_t>(), out.mutable_data<data_t>(), x.size());
}));
// Check attrs value
......@@ -177,13 +175,12 @@ std::vector<paddle::Tensor> AttrTestBackward(
const std::vector<std::string>& str_vec_attr) {
auto grad_x = paddle::Tensor(paddle::PlaceType::kCPU, grad_out.shape());
PD_DISPATCH_FLOATING_TYPES(
grad_out.type(), "assign_cpu_kernel", ([&] {
assign_cpu_kernel<data_t>(
grad_out.data<data_t>(),
grad_x.mutable_data<data_t>(paddle::PlaceType::kCPU),
grad_out.size());
}));
PD_DISPATCH_FLOATING_TYPES(grad_out.type(), "assign_cpu_kernel", ([&] {
assign_cpu_kernel<data_t>(
grad_out.data<data_t>(),
grad_x.mutable_data<data_t>(),
grad_out.size());
}));
CheckAllBackwardAttrs(int_attr, float_vec_attr, str_vec_attr);
......@@ -206,9 +203,7 @@ std::vector<paddle::Tensor> ConstAttrTestForward(
PD_DISPATCH_FLOATING_TYPES(
x.type(), "assign_cpu_kernel", ([&] {
assign_cpu_kernel<data_t>(
x.data<data_t>(),
out.mutable_data<data_t>(paddle::PlaceType::kCPU),
x.size());
x.data<data_t>(), out.mutable_data<data_t>(), x.size());
}));
// Check attrs value
......@@ -246,13 +241,12 @@ std::vector<paddle::Tensor> ConstAttrTestBackward(
const std::vector<std::string>& str_vec_attr) {
auto grad_x = paddle::Tensor(paddle::PlaceType::kCPU, grad_out.shape());
PD_DISPATCH_FLOATING_TYPES(
grad_out.type(), "assign_cpu_kernel", ([&] {
assign_cpu_kernel<data_t>(
grad_out.data<data_t>(),
grad_x.mutable_data<data_t>(paddle::PlaceType::kCPU),
grad_out.size());
}));
PD_DISPATCH_FLOATING_TYPES(grad_out.type(), "assign_cpu_kernel", ([&] {
assign_cpu_kernel<data_t>(
grad_out.data<data_t>(),
grad_x.mutable_data<data_t>(),
grad_out.size());
}));
CheckAllBackwardAttrs(int_attr, float_vec_attr, str_vec_attr);
......
......@@ -47,7 +47,7 @@ void ConcatCpuKernel(const std::vector<paddle::Tensor>& ins,
int64_t out_cols = 0;
auto ins_cols = GetCols(ins, out_rows, &out_cols);
auto* out_data = out->mutable_data<data_t>(paddle::PlaceType::kCPU);
auto* out_data = out->mutable_data<data_t>();
int64_t col_idx = 0;
for (size_t i = 0; i < num; ++i) {
int64_t col_len = ins_cols[i];
......@@ -76,9 +76,7 @@ void SplitCpuKernel(const paddle::Tensor& in,
int64_t col_idx = 0;
for (size_t j = 0; j < num; ++j) {
int64_t col_len = out_cols[j];
auto* out_data =
outs->at(j).mutable_data<data_t>(paddle::PlaceType::kCPU) +
i * col_len;
auto* out_data = outs->at(j).mutable_data<data_t>() + i * col_len;
std::memcpy(out_data, in_data + col_idx, sizeof(data_t) * col_len);
col_idx += col_len;
}
......
......@@ -76,9 +76,7 @@ std::vector<paddle::Tensor> ConjFunction(const paddle::Tensor& x) {
PD_DISPATCH_FLOATING_AND_COMPLEX_TYPES(
x.type(), "ConjCPUKernel", ([&] {
ConjCPUKernel<data_t>(
x.data<data_t>(),
x.size(),
out.mutable_data<data_t>(paddle::PlaceType::kCPU));
x.data<data_t>(), x.size(), out.mutable_data<data_t>());
}));
return {out};
......
......@@ -32,9 +32,7 @@ std::vector<paddle::Tensor> DispatchTestInterger(const paddle::Tensor& x) {
PD_DISPATCH_INTEGRAL_TYPES(
x.type(), "assign_cpu_kernel", ([&] {
assign_cpu_kernel<data_t>(
x.data<data_t>(),
out.mutable_data<data_t>(paddle::PlaceType::kCPU),
x.size());
x.data<data_t>(), out.mutable_data<data_t>(), x.size());
}));
return {out};
......@@ -52,9 +50,7 @@ std::vector<paddle::Tensor> DispatchTestFloatAndInteger(
PD_DISPATCH_FLOATING_AND_INTEGRAL_TYPES(
x.type(), "assign_cpu_kernel", ([&] {
assign_cpu_kernel<data_t>(
x.data<data_t>(),
out.mutable_data<data_t>(paddle::PlaceType::kCPU),
x.size());
x.data<data_t>(), out.mutable_data<data_t>(), x.size());
}));
return {out};
......@@ -71,9 +67,7 @@ std::vector<paddle::Tensor> DispatchTestComplex(const paddle::Tensor& x) {
PD_DISPATCH_COMPLEX_TYPES(
x.type(), "assign_cpu_kernel", ([&] {
assign_cpu_kernel<data_t>(
x.data<data_t>(),
out.mutable_data<data_t>(paddle::PlaceType::kCPU),
x.size());
x.data<data_t>(), out.mutable_data<data_t>(), x.size());
}));
return {out};
......@@ -91,9 +85,7 @@ std::vector<paddle::Tensor> DispatchTestFloatAndComplex(
PD_DISPATCH_FLOATING_AND_COMPLEX_TYPES(
x.type(), "assign_cpu_kernel", ([&] {
assign_cpu_kernel<data_t>(
x.data<data_t>(),
out.mutable_data<data_t>(paddle::PlaceType::kCPU),
x.size());
x.data<data_t>(), out.mutable_data<data_t>(), x.size());
}));
return {out};
......@@ -111,9 +103,7 @@ std::vector<paddle::Tensor> DispatchTestFloatAndIntegerAndComplex(
PD_DISPATCH_FLOATING_AND_INTEGRAL_AND_COMPLEX_TYPES(
x.type(), "assign_cpu_kernel", ([&] {
assign_cpu_kernel<data_t>(
x.data<data_t>(),
out.mutable_data<data_t>(paddle::PlaceType::kCPU),
x.size());
x.data<data_t>(), out.mutable_data<data_t>(), x.size());
}));
return {out};
......@@ -130,9 +120,7 @@ std::vector<paddle::Tensor> DispatchTestFloatAndHalf(const paddle::Tensor& x) {
PD_DISPATCH_FLOATING_AND_HALF_TYPES(
x.type(), "assign_cpu_kernel", ([&] {
assign_cpu_kernel<data_t>(
x.data<data_t>(),
out.mutable_data<data_t>(paddle::PlaceType::kCPU),
x.size());
x.data<data_t>(), out.mutable_data<data_t>(), x.size());
}));
return {out};
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册