未验证 提交 712ccfbf 编写于 作者: 石晓伟 提交者: GitHub

fix custom ops, test=develop (#39153)

上级 f07b8cbe
...@@ -505,6 +505,12 @@ class PADDLE_API Tensor final { ...@@ -505,6 +505,12 @@ class PADDLE_API Tensor final {
* in the development of new dygraph. It may be removed in the future. * in the development of new dygraph. It may be removed in the future.
*/ */
std::string name_{""}; std::string name_{""};
/**
* Place type: Return the expected memory location if the Tensor is
* uninitialized.
*/
PlaceType place_{PlaceType::kUNK};
}; };
} // namespace experimental } // namespace experimental
......
...@@ -78,7 +78,8 @@ Tensor::Tensor(const PlaceType &place) ...@@ -78,7 +78,8 @@ Tensor::Tensor(const PlaceType &place)
ConvertExtPlaceToInnerPlace(place))), ConvertExtPlaceToInnerPlace(place))),
std::move(pten::DenseTensorMeta(pten::DataType::UNDEFINED, std::move(pten::DenseTensorMeta(pten::DataType::UNDEFINED,
framework::make_ddim({}), framework::make_ddim({}),
pten::DataLayout::NCHW))))) {} pten::DataLayout::NCHW))))),
place_{place} {}
Tensor::Tensor(const PlaceType &place, const std::vector<int64_t> &shape) Tensor::Tensor(const PlaceType &place, const std::vector<int64_t> &shape)
: impl_(std::move(std::make_shared<pten::DenseTensor>( : impl_(std::move(std::make_shared<pten::DenseTensor>(
...@@ -86,7 +87,8 @@ Tensor::Tensor(const PlaceType &place, const std::vector<int64_t> &shape) ...@@ -86,7 +87,8 @@ Tensor::Tensor(const PlaceType &place, const std::vector<int64_t> &shape)
ConvertExtPlaceToInnerPlace(place))), ConvertExtPlaceToInnerPlace(place))),
std::move(pten::DenseTensorMeta(pten::DataType::UNDEFINED, std::move(pten::DenseTensorMeta(pten::DataType::UNDEFINED,
framework::make_ddim(shape), framework::make_ddim(shape),
pten::DataLayout::NCHW))))) {} pten::DataLayout::NCHW))))),
place_{place} {}
/* Part 2: Dimension, DataType and DataLayout methods */ /* Part 2: Dimension, DataType and DataLayout methods */
...@@ -131,17 +133,23 @@ bool Tensor::is_dense_tensor() const { ...@@ -131,17 +133,23 @@ bool Tensor::is_dense_tensor() const {
/* Part 3: Device and Backend methods */ /* Part 3: Device and Backend methods */
PlaceType Tensor::place() const { PlaceType Tensor::place() const {
if (!impl_->initialized()) {
return place_;
} else {
return ConvertInnerPlaceToExtPlace(impl_->place()); return ConvertInnerPlaceToExtPlace(impl_->place());
}
} }
paddle::platform::Place Tensor::inner_place() const { return impl_->place(); } paddle::platform::Place Tensor::inner_place() const {
return ConvertExtPlaceToInnerPlace(place());
}
bool Tensor::is_cpu() const { bool Tensor::is_cpu() const {
return paddle::platform::is_cpu_place(impl_->place()); return paddle::platform::is_cpu_place(inner_place());
} }
bool Tensor::is_cuda() const { bool Tensor::is_cuda() const {
return paddle::platform::is_gpu_place(impl_->place()); return paddle::platform::is_gpu_place(inner_place());
} }
/* Part 4: Data Access methods */ /* Part 4: Data Access methods */
......
...@@ -137,9 +137,7 @@ std::vector<paddle::Tensor> AttrTestForward( ...@@ -137,9 +137,7 @@ std::vector<paddle::Tensor> AttrTestForward(
PD_DISPATCH_FLOATING_TYPES( PD_DISPATCH_FLOATING_TYPES(
x.type(), "assign_cpu_kernel", ([&] { x.type(), "assign_cpu_kernel", ([&] {
assign_cpu_kernel<data_t>( assign_cpu_kernel<data_t>(
x.data<data_t>(), x.data<data_t>(), out.mutable_data<data_t>(), x.size());
out.mutable_data<data_t>(paddle::PlaceType::kCPU),
x.size());
})); }));
// Check attrs value // Check attrs value
...@@ -177,11 +175,10 @@ std::vector<paddle::Tensor> AttrTestBackward( ...@@ -177,11 +175,10 @@ std::vector<paddle::Tensor> AttrTestBackward(
const std::vector<std::string>& str_vec_attr) { const std::vector<std::string>& str_vec_attr) {
auto grad_x = paddle::Tensor(paddle::PlaceType::kCPU, grad_out.shape()); auto grad_x = paddle::Tensor(paddle::PlaceType::kCPU, grad_out.shape());
PD_DISPATCH_FLOATING_TYPES( PD_DISPATCH_FLOATING_TYPES(grad_out.type(), "assign_cpu_kernel", ([&] {
grad_out.type(), "assign_cpu_kernel", ([&] {
assign_cpu_kernel<data_t>( assign_cpu_kernel<data_t>(
grad_out.data<data_t>(), grad_out.data<data_t>(),
grad_x.mutable_data<data_t>(paddle::PlaceType::kCPU), grad_x.mutable_data<data_t>(),
grad_out.size()); grad_out.size());
})); }));
...@@ -206,9 +203,7 @@ std::vector<paddle::Tensor> ConstAttrTestForward( ...@@ -206,9 +203,7 @@ std::vector<paddle::Tensor> ConstAttrTestForward(
PD_DISPATCH_FLOATING_TYPES( PD_DISPATCH_FLOATING_TYPES(
x.type(), "assign_cpu_kernel", ([&] { x.type(), "assign_cpu_kernel", ([&] {
assign_cpu_kernel<data_t>( assign_cpu_kernel<data_t>(
x.data<data_t>(), x.data<data_t>(), out.mutable_data<data_t>(), x.size());
out.mutable_data<data_t>(paddle::PlaceType::kCPU),
x.size());
})); }));
// Check attrs value // Check attrs value
...@@ -246,11 +241,10 @@ std::vector<paddle::Tensor> ConstAttrTestBackward( ...@@ -246,11 +241,10 @@ std::vector<paddle::Tensor> ConstAttrTestBackward(
const std::vector<std::string>& str_vec_attr) { const std::vector<std::string>& str_vec_attr) {
auto grad_x = paddle::Tensor(paddle::PlaceType::kCPU, grad_out.shape()); auto grad_x = paddle::Tensor(paddle::PlaceType::kCPU, grad_out.shape());
PD_DISPATCH_FLOATING_TYPES( PD_DISPATCH_FLOATING_TYPES(grad_out.type(), "assign_cpu_kernel", ([&] {
grad_out.type(), "assign_cpu_kernel", ([&] {
assign_cpu_kernel<data_t>( assign_cpu_kernel<data_t>(
grad_out.data<data_t>(), grad_out.data<data_t>(),
grad_x.mutable_data<data_t>(paddle::PlaceType::kCPU), grad_x.mutable_data<data_t>(),
grad_out.size()); grad_out.size());
})); }));
......
...@@ -47,7 +47,7 @@ void ConcatCpuKernel(const std::vector<paddle::Tensor>& ins, ...@@ -47,7 +47,7 @@ void ConcatCpuKernel(const std::vector<paddle::Tensor>& ins,
int64_t out_cols = 0; int64_t out_cols = 0;
auto ins_cols = GetCols(ins, out_rows, &out_cols); auto ins_cols = GetCols(ins, out_rows, &out_cols);
auto* out_data = out->mutable_data<data_t>(paddle::PlaceType::kCPU); auto* out_data = out->mutable_data<data_t>();
int64_t col_idx = 0; int64_t col_idx = 0;
for (size_t i = 0; i < num; ++i) { for (size_t i = 0; i < num; ++i) {
int64_t col_len = ins_cols[i]; int64_t col_len = ins_cols[i];
...@@ -76,9 +76,7 @@ void SplitCpuKernel(const paddle::Tensor& in, ...@@ -76,9 +76,7 @@ void SplitCpuKernel(const paddle::Tensor& in,
int64_t col_idx = 0; int64_t col_idx = 0;
for (size_t j = 0; j < num; ++j) { for (size_t j = 0; j < num; ++j) {
int64_t col_len = out_cols[j]; int64_t col_len = out_cols[j];
auto* out_data = auto* out_data = outs->at(j).mutable_data<data_t>() + i * col_len;
outs->at(j).mutable_data<data_t>(paddle::PlaceType::kCPU) +
i * col_len;
std::memcpy(out_data, in_data + col_idx, sizeof(data_t) * col_len); std::memcpy(out_data, in_data + col_idx, sizeof(data_t) * col_len);
col_idx += col_len; col_idx += col_len;
} }
......
...@@ -76,9 +76,7 @@ std::vector<paddle::Tensor> ConjFunction(const paddle::Tensor& x) { ...@@ -76,9 +76,7 @@ std::vector<paddle::Tensor> ConjFunction(const paddle::Tensor& x) {
PD_DISPATCH_FLOATING_AND_COMPLEX_TYPES( PD_DISPATCH_FLOATING_AND_COMPLEX_TYPES(
x.type(), "ConjCPUKernel", ([&] { x.type(), "ConjCPUKernel", ([&] {
ConjCPUKernel<data_t>( ConjCPUKernel<data_t>(
x.data<data_t>(), x.data<data_t>(), x.size(), out.mutable_data<data_t>());
x.size(),
out.mutable_data<data_t>(paddle::PlaceType::kCPU));
})); }));
return {out}; return {out};
......
...@@ -32,9 +32,7 @@ std::vector<paddle::Tensor> DispatchTestInterger(const paddle::Tensor& x) { ...@@ -32,9 +32,7 @@ std::vector<paddle::Tensor> DispatchTestInterger(const paddle::Tensor& x) {
PD_DISPATCH_INTEGRAL_TYPES( PD_DISPATCH_INTEGRAL_TYPES(
x.type(), "assign_cpu_kernel", ([&] { x.type(), "assign_cpu_kernel", ([&] {
assign_cpu_kernel<data_t>( assign_cpu_kernel<data_t>(
x.data<data_t>(), x.data<data_t>(), out.mutable_data<data_t>(), x.size());
out.mutable_data<data_t>(paddle::PlaceType::kCPU),
x.size());
})); }));
return {out}; return {out};
...@@ -52,9 +50,7 @@ std::vector<paddle::Tensor> DispatchTestFloatAndInteger( ...@@ -52,9 +50,7 @@ std::vector<paddle::Tensor> DispatchTestFloatAndInteger(
PD_DISPATCH_FLOATING_AND_INTEGRAL_TYPES( PD_DISPATCH_FLOATING_AND_INTEGRAL_TYPES(
x.type(), "assign_cpu_kernel", ([&] { x.type(), "assign_cpu_kernel", ([&] {
assign_cpu_kernel<data_t>( assign_cpu_kernel<data_t>(
x.data<data_t>(), x.data<data_t>(), out.mutable_data<data_t>(), x.size());
out.mutable_data<data_t>(paddle::PlaceType::kCPU),
x.size());
})); }));
return {out}; return {out};
...@@ -71,9 +67,7 @@ std::vector<paddle::Tensor> DispatchTestComplex(const paddle::Tensor& x) { ...@@ -71,9 +67,7 @@ std::vector<paddle::Tensor> DispatchTestComplex(const paddle::Tensor& x) {
PD_DISPATCH_COMPLEX_TYPES( PD_DISPATCH_COMPLEX_TYPES(
x.type(), "assign_cpu_kernel", ([&] { x.type(), "assign_cpu_kernel", ([&] {
assign_cpu_kernel<data_t>( assign_cpu_kernel<data_t>(
x.data<data_t>(), x.data<data_t>(), out.mutable_data<data_t>(), x.size());
out.mutable_data<data_t>(paddle::PlaceType::kCPU),
x.size());
})); }));
return {out}; return {out};
...@@ -91,9 +85,7 @@ std::vector<paddle::Tensor> DispatchTestFloatAndComplex( ...@@ -91,9 +85,7 @@ std::vector<paddle::Tensor> DispatchTestFloatAndComplex(
PD_DISPATCH_FLOATING_AND_COMPLEX_TYPES( PD_DISPATCH_FLOATING_AND_COMPLEX_TYPES(
x.type(), "assign_cpu_kernel", ([&] { x.type(), "assign_cpu_kernel", ([&] {
assign_cpu_kernel<data_t>( assign_cpu_kernel<data_t>(
x.data<data_t>(), x.data<data_t>(), out.mutable_data<data_t>(), x.size());
out.mutable_data<data_t>(paddle::PlaceType::kCPU),
x.size());
})); }));
return {out}; return {out};
...@@ -111,9 +103,7 @@ std::vector<paddle::Tensor> DispatchTestFloatAndIntegerAndComplex( ...@@ -111,9 +103,7 @@ std::vector<paddle::Tensor> DispatchTestFloatAndIntegerAndComplex(
PD_DISPATCH_FLOATING_AND_INTEGRAL_AND_COMPLEX_TYPES( PD_DISPATCH_FLOATING_AND_INTEGRAL_AND_COMPLEX_TYPES(
x.type(), "assign_cpu_kernel", ([&] { x.type(), "assign_cpu_kernel", ([&] {
assign_cpu_kernel<data_t>( assign_cpu_kernel<data_t>(
x.data<data_t>(), x.data<data_t>(), out.mutable_data<data_t>(), x.size());
out.mutable_data<data_t>(paddle::PlaceType::kCPU),
x.size());
})); }));
return {out}; return {out};
...@@ -130,9 +120,7 @@ std::vector<paddle::Tensor> DispatchTestFloatAndHalf(const paddle::Tensor& x) { ...@@ -130,9 +120,7 @@ std::vector<paddle::Tensor> DispatchTestFloatAndHalf(const paddle::Tensor& x) {
PD_DISPATCH_FLOATING_AND_HALF_TYPES( PD_DISPATCH_FLOATING_AND_HALF_TYPES(
x.type(), "assign_cpu_kernel", ([&] { x.type(), "assign_cpu_kernel", ([&] {
assign_cpu_kernel<data_t>( assign_cpu_kernel<data_t>(
x.data<data_t>(), x.data<data_t>(), out.mutable_data<data_t>(), x.size());
out.mutable_data<data_t>(paddle::PlaceType::kCPU),
x.size());
})); }));
return {out}; return {out};
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册