未验证 提交 88ec08a7 编写于 作者: F From00 提交者: GitHub

Move Pool OPs to phi (#40208)

* Move Pool OPs to phi

* Fix CI error

* Fix conflicts
上级 5ab2cec5
...@@ -297,7 +297,8 @@ phi::InferMetaContext BuildInferMetaContext(InferShapeContext* ctx, ...@@ -297,7 +297,8 @@ phi::InferMetaContext BuildInferMetaContext(InferShapeContext* ctx,
VLOG(3) << "BuildInferMetaContext: op kernel signature - " << signature; VLOG(3) << "BuildInferMetaContext: op kernel signature - " << signature;
// 2. build infermeta context // 2. build infermeta context
phi::InferMetaContext infer_meta_context(ctx->IsRuntime()); phi::InferMetaContext infer_meta_context(
{ctx->IsRuntime(), ctx->IsRunMKLDNNKernel()});
auto& input_names = std::get<0>(signature.args); auto& input_names = std::get<0>(signature.args);
auto& attr_names = std::get<1>(signature.args); auto& attr_names = std::get<1>(signature.args);
......
...@@ -264,14 +264,23 @@ void BuildDygraphPhiKernelContext( ...@@ -264,14 +264,23 @@ void BuildDygraphPhiKernelContext(
size_t start_idx = (i == 0 ? 0 : kernel_ctx->InputRangeAt(i - 1).second); size_t start_idx = (i == 0 ? 0 : kernel_ctx->InputRangeAt(i - 1).second);
if ((it == ins.end()) && if (it == ins.end()) {
(input_defs[i].type_index == if (LIKELY(input_defs[i].type_index ==
std::type_index(typeid(paddle::optional<const phi::DenseTensor&>)))) { std::type_index(
typeid(paddle::optional<const phi::DenseTensor&>)))) {
kernel_ctx->EmplaceBackInputWithoutSetRange(nullptr); kernel_ctx->EmplaceBackInputWithoutSetRange(nullptr);
auto end_idx = start_idx + 1; auto end_idx = start_idx + 1;
kernel_ctx->AssignInputRange(std::make_pair(start_idx, end_idx), i); kernel_ctx->AssignInputRange(std::make_pair(start_idx, end_idx), i);
continue; continue;
} else {
PADDLE_THROW(phi::errors::NotFound(
"Can not find input variable '%s' for %s OP, please check whether "
"the name setting in OpArgumentMapping is consistent with that in "
"OpMaker.",
input_names[i], pt_kernel_signature.name));
}
} }
auto ins_vector = it->second; auto ins_vector = it->second;
size_t end_idx = start_idx + ins_vector.size(); size_t end_idx = start_idx + ins_vector.size();
......
...@@ -328,5 +328,5 @@ class Pool2dOpConverter : public OpConverter { ...@@ -328,5 +328,5 @@ class Pool2dOpConverter : public OpConverter {
} // namespace inference } // namespace inference
} // namespace paddle } // namespace paddle
USE_OP(pool2d); USE_OP_ITSELF(pool2d);
REGISTER_TRT_OP_CONVERTER(pool2d, Pool2dOpConverter); REGISTER_TRT_OP_CONVERTER(pool2d, Pool2dOpConverter);
...@@ -224,5 +224,5 @@ class Pool3dOpConverter : public OpConverter { ...@@ -224,5 +224,5 @@ class Pool3dOpConverter : public OpConverter {
} // namespace inference } // namespace inference
} // namespace paddle } // namespace paddle
USE_OP(pool3d); USE_OP_ITSELF(pool3d);
REGISTER_TRT_OP_CONVERTER(pool3d, Pool3dOpConverter); REGISTER_TRT_OP_CONVERTER(pool3d, Pool3dOpConverter);
...@@ -71,4 +71,4 @@ TEST(Pool2dOpConverter, avg_ceil_test) { test_pool2d(false, true, "avg"); } ...@@ -71,4 +71,4 @@ TEST(Pool2dOpConverter, avg_ceil_test) { test_pool2d(false, true, "avg"); }
} // namespace inference } // namespace inference
} // namespace paddle } // namespace paddle
USE_OP(pool2d); USE_OP_ITSELF(pool2d);
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
// limitations under the License. // limitations under the License.
#include "paddle/fluid/inference/tensorrt/plugin/pool3d_op_plugin.h" #include "paddle/fluid/inference/tensorrt/plugin/pool3d_op_plugin.h"
#include "paddle/fluid/operators/math/pooling.h" #include "paddle/phi/kernels/funcs/pooling.h"
namespace paddle { namespace paddle {
namespace inference { namespace inference {
...@@ -108,16 +108,14 @@ int Pool3DPlugin::enqueue(int batchSize, const void *const *inputs, ...@@ -108,16 +108,14 @@ int Pool3DPlugin::enqueue(int batchSize, const void *const *inputs,
output_shape.insert(output_shape.begin(), batchSize); output_shape.insert(output_shape.begin(), batchSize);
if (pool3d_type_ == Pool3DType::max) { if (pool3d_type_ == Pool3DType::max) {
paddle::operators::math::MaxPool<float> pool_process; phi::funcs::MaxPool<float> pool_process;
paddle::operators::math::Pool3dDirectCUDAFunctor< phi::funcs::Pool3dDirectCUDAFunctor<phi::funcs::MaxPool<float>, float>
paddle::operators::math::MaxPool<float>, float>
pool3d_forward; pool3d_forward;
pool3d_forward(idata, input_shape, output_shape, ksize_, strides_, pool3d_forward(idata, input_shape, output_shape, ksize_, strides_,
paddings_, true, adaptive_, odatas[0], stream, pool_process); paddings_, true, adaptive_, odatas[0], stream, pool_process);
} else if (pool3d_type_ == Pool3DType::avg) { } else if (pool3d_type_ == Pool3DType::avg) {
paddle::operators::math::AvgPool<float> pool_process; phi::funcs::AvgPool<float> pool_process;
paddle::operators::math::Pool3dDirectCUDAFunctor< phi::funcs::Pool3dDirectCUDAFunctor<phi::funcs::AvgPool<float>, float>
paddle::operators::math::AvgPool<float>, float>
pool3d_forward; pool3d_forward;
pool3d_forward(idata, input_shape, output_shape, ksize_, strides_, pool3d_forward(idata, input_shape, output_shape, ksize_, strides_,
paddings_, true, adaptive_, odatas[0], stream, pool_process); paddings_, true, adaptive_, odatas[0], stream, pool_process);
...@@ -351,16 +349,14 @@ int Pool3DPluginDynamic::enqueue(const nvinfer1::PluginTensorDesc *input_desc, ...@@ -351,16 +349,14 @@ int Pool3DPluginDynamic::enqueue(const nvinfer1::PluginTensorDesc *input_desc,
} }
if (pool3d_type_ == "max") { if (pool3d_type_ == "max") {
paddle::operators::math::MaxPool<float> pool_process; phi::funcs::MaxPool<float> pool_process;
paddle::operators::math::Pool3dDirectCUDAFunctor< phi::funcs::Pool3dDirectCUDAFunctor<phi::funcs::MaxPool<float>, float>
paddle::operators::math::MaxPool<float>, float>
pool3d_forward; pool3d_forward;
pool3d_forward(input, input_shape, output_shape, ksize, strides_, paddings, pool3d_forward(input, input_shape, output_shape, ksize, strides_, paddings,
true, adaptive_, output, stream, pool_process); true, adaptive_, output, stream, pool_process);
} else if (pool3d_type_ == "avg") { } else if (pool3d_type_ == "avg") {
paddle::operators::math::AvgPool<float> pool_process; phi::funcs::AvgPool<float> pool_process;
paddle::operators::math::Pool3dDirectCUDAFunctor< phi::funcs::Pool3dDirectCUDAFunctor<phi::funcs::AvgPool<float>, float>
paddle::operators::math::AvgPool<float>, float>
pool3d_forward; pool3d_forward;
pool3d_forward(input, input_shape, output_shape, ksize, strides_, paddings, pool3d_forward(input, input_shape, output_shape, ksize, strides_, paddings,
true, adaptive_, output, stream, pool_process); true, adaptive_, output, stream, pool_process);
......
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
// limitations under the License. // limitations under the License.
#include "paddle/fluid/inference/tensorrt/plugin/pool_op_plugin.h" #include "paddle/fluid/inference/tensorrt/plugin/pool_op_plugin.h"
#include "paddle/fluid/operators/math/pooling.h" #include "paddle/phi/kernels/funcs/pooling.h"
namespace paddle { namespace paddle {
namespace inference { namespace inference {
...@@ -84,16 +84,14 @@ int PoolPlugin::enqueue(int batchSize, const void *const *inputs, ...@@ -84,16 +84,14 @@ int PoolPlugin::enqueue(int batchSize, const void *const *inputs,
output_shape.insert(output_shape.begin(), batchSize); output_shape.insert(output_shape.begin(), batchSize);
if (pool_type_ == PoolType::max) { if (pool_type_ == PoolType::max) {
paddle::operators::math::MaxPool<float> pool_process; phi::funcs::MaxPool<float> pool_process;
paddle::operators::math::Pool2dDirectCUDAFunctor< phi::funcs::Pool2dDirectCUDAFunctor<phi::funcs::MaxPool<float>, float>
paddle::operators::math::MaxPool<float>, float>
pool2d_forward; pool2d_forward;
pool2d_forward(idata, input_shape, output_shape, ksize_, strides_, pool2d_forward(idata, input_shape, output_shape, ksize_, strides_,
paddings_, true, false, odatas[0], stream, pool_process); paddings_, true, false, odatas[0], stream, pool_process);
} else if (pool_type_ == PoolType::avg) { } else if (pool_type_ == PoolType::avg) {
paddle::operators::math::AvgPool<float> pool_process; phi::funcs::AvgPool<float> pool_process;
paddle::operators::math::Pool2dDirectCUDAFunctor< phi::funcs::Pool2dDirectCUDAFunctor<phi::funcs::AvgPool<float>, float>
paddle::operators::math::AvgPool<float>, float>
pool2d_forward; pool2d_forward;
pool2d_forward(idata, input_shape, output_shape, ksize_, strides_, pool2d_forward(idata, input_shape, output_shape, ksize_, strides_,
paddings_, exclusive_, adaptive_, odatas[0], stream, paddings_, exclusive_, adaptive_, odatas[0], stream,
...@@ -292,16 +290,14 @@ int PoolPluginDynamic::enqueue(const nvinfer1::PluginTensorDesc *input_desc, ...@@ -292,16 +290,14 @@ int PoolPluginDynamic::enqueue(const nvinfer1::PluginTensorDesc *input_desc,
} }
if (pool_type_ == "max") { if (pool_type_ == "max") {
paddle::operators::math::MaxPool<float> pool_process; phi::funcs::MaxPool<float> pool_process;
paddle::operators::math::Pool2dDirectCUDAFunctor< phi::funcs::Pool2dDirectCUDAFunctor<phi::funcs::MaxPool<float>, float>
paddle::operators::math::MaxPool<float>, float>
pool2d_forward; pool2d_forward;
pool2d_forward(input, input_shape, output_shape, ksize, strides_, paddings, pool2d_forward(input, input_shape, output_shape, ksize, strides_, paddings,
true, false, output, stream, pool_process); true, false, output, stream, pool_process);
} else if (pool_type_ == "avg") { } else if (pool_type_ == "avg") {
paddle::operators::math::AvgPool<float> pool_process; phi::funcs::AvgPool<float> pool_process;
paddle::operators::math::Pool2dDirectCUDAFunctor< phi::funcs::Pool2dDirectCUDAFunctor<phi::funcs::AvgPool<float>, float>
paddle::operators::math::AvgPool<float>, float>
pool2d_forward; pool2d_forward;
pool2d_forward(input, input_shape, output_shape, ksize, strides_, paddings, pool2d_forward(input, input_shape, output_shape, ksize, strides_, paddings,
exclusive_, adaptive_, output, stream, pool_process); exclusive_, adaptive_, output, stream, pool_process);
......
...@@ -16,7 +16,6 @@ limitations under the License. */ ...@@ -16,7 +16,6 @@ limitations under the License. */
#include <vector> #include <vector>
#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/phi_utils.h" #include "paddle/fluid/framework/phi_utils.h"
#include "paddle/fluid/operators/math/pooling.h"
#include "paddle/fluid/platform/device_context.h" #include "paddle/fluid/platform/device_context.h"
#include "paddle/phi/kernels/empty_kernel.h" #include "paddle/phi/kernels/empty_kernel.h"
#include "paddle/phi/kernels/flatten_grad_kernel.h" #include "paddle/phi/kernels/flatten_grad_kernel.h"
......
...@@ -20,7 +20,6 @@ math_library(sampler DEPS generator) ...@@ -20,7 +20,6 @@ math_library(sampler DEPS generator)
# math_library(math_function DEPS blas dense_tensor tensor) # math_library(math_function DEPS blas dense_tensor tensor)
math_library(maxouting) math_library(maxouting)
math_library(pooling)
if(WITH_MKLDNN) if(WITH_MKLDNN)
math_library(selected_rows_functor DEPS selected_rows_utils math_function blas mkldnn_axpy_handler) math_library(selected_rows_functor DEPS selected_rows_utils math_function blas mkldnn_axpy_handler)
......
...@@ -12,14 +12,16 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ...@@ -12,14 +12,16 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "paddle/fluid/operators/pool_op.h" #include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/platform/mkldnn_helper.h" #include "paddle/fluid/platform/mkldnn_helper.h"
#include "paddle/fluid/platform/mkldnn_reuse.h" #include "paddle/fluid/platform/mkldnn_reuse.h"
#include "paddle/phi/kernels/funcs/pooling.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
using framework::DataLayout; using framework::DataLayout;
using framework::Tensor;
using dnnl::memory; using dnnl::memory;
using dnnl::pooling_backward; using dnnl::pooling_backward;
using dnnl::pooling_forward; using dnnl::pooling_forward;
...@@ -83,10 +85,10 @@ class PoolingMKLDNNHandler ...@@ -83,10 +85,10 @@ class PoolingMKLDNNHandler
phi::slice_ddim(input_dims, 2, input_dims.size()); phi::slice_ddim(input_dims, 2, input_dims.size());
if (global_pooling) { if (global_pooling) {
operators::UpdateKsize(&ksize, data_dims); phi::funcs::UpdateKernelSize(&ksize, data_dims);
} }
operators::UpdatePadding(&paddings, global_pooling, 0, padding_algorithm, phi::funcs::UpdatePadding(&paddings, global_pooling, 0, padding_algorithm,
data_dims, strides, ksize); data_dims, strides, ksize);
const auto src_tz = phi::vectorize(input->dims()); const auto src_tz = phi::vectorize(input->dims());
...@@ -173,10 +175,10 @@ class PoolingMKLDNNHandler ...@@ -173,10 +175,10 @@ class PoolingMKLDNNHandler
framework::DDim data_dims = phi::slice_ddim(in_x_dims, 2, in_x_dims.size()); framework::DDim data_dims = phi::slice_ddim(in_x_dims, 2, in_x_dims.size());
if (global_pooling) { if (global_pooling) {
operators::UpdateKsize(&ksize, data_dims); phi::funcs::UpdateKernelSize(&ksize, data_dims);
} }
operators::UpdatePadding(&paddings, global_pooling, 0, padding_algorithm, phi::funcs::UpdatePadding(&paddings, global_pooling, 0, padding_algorithm,
data_dims, strides, ksize); data_dims, strides, ksize);
auto src_tz = phi::vectorize<int64_t>(in_x->dims()); auto src_tz = phi::vectorize<int64_t>(in_x->dims());
......
...@@ -26,13 +26,14 @@ ...@@ -26,13 +26,14 @@
#include "paddle/fluid/platform/place.h" #include "paddle/fluid/platform/place.h"
#include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/core/kernel_registry.h"
USE_OP(pool2d); USE_OP_ITSELF(pool2d);
USE_OP_DEVICE_KERNEL(pool2d, MKLDNN); USE_OP_DEVICE_KERNEL(pool2d, MKLDNN);
USE_OP_ITSELF(relu); USE_OP_ITSELF(relu);
USE_OP_DEVICE_KERNEL(relu, MKLDNN); USE_OP_DEVICE_KERNEL(relu, MKLDNN);
USE_OP_ITSELF(transpose); USE_OP_ITSELF(transpose);
USE_OP_DEVICE_KERNEL(transpose, MKLDNN); USE_OP_DEVICE_KERNEL(transpose, MKLDNN);
PD_DECLARE_KERNEL(pool2d, CPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(relu, CPU, ALL_LAYOUT); PD_DECLARE_KERNEL(relu, CPU, ALL_LAYOUT);
namespace paddle { namespace paddle {
......
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <string>
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/pool_op.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#ifdef PADDLE_WITH_HIP
#include "paddle/fluid/framework/data_type.h"
#include "paddle/fluid/framework/operator.h"
#endif
#include "paddle/fluid/platform/device/gpu/gpu_dnn.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
using ScopedTensorDescriptor = platform::ScopedTensorDescriptor;
using ScopedPoolingDescriptor = platform::ScopedPoolingDescriptor;
using DataLayout = platform::DataLayout;
using PoolingMode = platform::PoolingMode;
template <typename T>
using ScalingParamType = typename platform::CudnnDataType<T>::ScalingParamType;
DataLayout getLayoutFromStr(std::string data_format) {
if (data_format == "NHWC") {
return DataLayout::kNHWC;
} else if (data_format == "NCHW") {
return DataLayout::kNCHW;
} else if (data_format == "NCDHW") {
return DataLayout::kNCDHW;
} else {
return DataLayout::kNCDHW;
}
}
template <typename T>
class PoolCUDNNOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &ctx) const override {
PADDLE_ENFORCE_EQ(
platform::is_gpu_place(ctx.GetPlace()), true,
platform::errors::InvalidArgument("Pool operator CUDA kernel must use "
"CUDAPlace rather than CPUPlace."));
const Tensor *input = ctx.Input<Tensor>("X");
Tensor *output = ctx.Output<Tensor>("Out");
output->mutable_data<T>(ctx.GetPlace());
std::string pooling_type = ctx.Attr<std::string>("pooling_type");
bool exclusive = ctx.Attr<bool>("exclusive");
bool adaptive = ctx.Attr<bool>("adaptive");
std::vector<int> ksize = ctx.Attr<std::vector<int>>("ksize");
std::vector<int> strides = ctx.Attr<std::vector<int>>("strides");
std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings");
std::string data_format = ctx.Attr<std::string>("data_format");
bool global_pooling = ctx.Attr<bool>("global_pooling");
std::string padding_algorithm = ctx.Attr<std::string>("padding_algorithm");
const bool channel_last = (data_format == "NHWC" || data_format == "NDHWC");
// update paddings
auto in_x_dims = input->dims();
framework::DDim data_dims;
if (channel_last) {
data_dims = phi::slice_ddim(in_x_dims, 1, in_x_dims.size() - 1);
} else {
data_dims = phi::slice_ddim(in_x_dims, 2, in_x_dims.size());
}
UpdatePadding(&paddings, global_pooling, adaptive, padding_algorithm,
data_dims, strides, ksize);
if (data_dims.size() * 2 == static_cast<int>(paddings.size())) {
for (int i = 0; i < data_dims.size(); ++i) {
paddings.erase(paddings.begin() + i + 1);
}
}
if (global_pooling) {
UpdateKsize(&ksize, data_dims);
}
const std::string str_NCHW = "NCHW", str_NHWC = "NHWC";
const std::string str_NCDHW = "NCDHW", str_NDHWC = "NDHWC";
// -----------------transformed tensor ------------------------
Tensor transformed_input(input->type());
Tensor transformed_output(output->type());
DataLayout layout;
if (data_format == str_NDHWC) {
layout = DataLayout::kNCDHW;
auto &dev_ctx =
ctx.template device_context<paddle::platform::CUDADeviceContext>();
std::vector<int> axis{0, 4, 1, 2, 3};
// input
transformed_input.Resize(input->dims());
auto in_dims_vec = phi::vectorize(input->dims());
in_dims_vec[1] = input->dims()[4];
in_dims_vec[2] = input->dims()[1];
in_dims_vec[3] = input->dims()[2];
in_dims_vec[4] = input->dims()[3];
transformed_input.Resize(phi::make_ddim(in_dims_vec));
transformed_input.mutable_data(ctx.GetPlace(), input->type());
phi::funcs::Transpose<paddle::platform::CUDADeviceContext, T, 5> trans5;
trans5(dev_ctx, *input, &transformed_input, axis);
// output
transformed_output.Resize(output->dims());
auto out_dims_vec = phi::vectorize(output->dims());
out_dims_vec[1] = output->dims()[4];
out_dims_vec[2] = output->dims()[1];
out_dims_vec[3] = output->dims()[2];
out_dims_vec[4] = output->dims()[3];
transformed_output.Resize(phi::make_ddim(out_dims_vec));
#ifdef PADDLE_WITH_HIP
// MIOPEN not support NHWC data layout
} else if (data_format == str_NHWC) {
layout = DataLayout::kNCHW;
auto &dev_ctx =
ctx.template device_context<paddle::platform::CUDADeviceContext>();
std::vector<int> axis{0, 3, 1, 2};
transformed_input.Resize(input->dims());
auto in_dims_vec = phi::vectorize(input->dims());
in_dims_vec[1] = input->dims()[3];
in_dims_vec[2] = input->dims()[1];
in_dims_vec[3] = input->dims()[2];
transformed_input.Resize(phi::make_ddim(in_dims_vec));
transformed_input.mutable_data(ctx.GetPlace(), input->type());
phi::funcs::Transpose<paddle::platform::CUDADeviceContext, T, 4> trans;
trans(dev_ctx, *input, &transformed_input, axis);
transformed_output.Resize(output->dims());
auto out_dims_vec = phi::vectorize(output->dims());
out_dims_vec[1] = output->dims()[3];
out_dims_vec[2] = output->dims()[1];
out_dims_vec[3] = output->dims()[2];
transformed_output.Resize(phi::make_ddim(out_dims_vec));
#endif
} else {
layout = getLayoutFromStr(data_format);
transformed_input = *input;
transformed_output = *output;
}
const T *tranformed_input_data = transformed_input.data<T>();
T *tranformed_output_data = transformed_output.mutable_data<T>(
transformed_output.dims(), ctx.GetPlace());
// ------------------- cudnn descriptors ---------------------
ScopedTensorDescriptor input_desc;
ScopedTensorDescriptor output_desc;
ScopedPoolingDescriptor pool_desc;
#ifdef PADDLE_WITH_HIP
miopenTensorDescriptor_t cudnn_input_desc = input_desc.descriptor<T>(
layout, phi::vectorize<int>(transformed_input.dims()));
miopenTensorDescriptor_t cudnn_output_desc = output_desc.descriptor<T>(
layout, phi::vectorize<int>(transformed_output.dims()));
#else
cudnnTensorDescriptor_t cudnn_input_desc = input_desc.descriptor<T>(
layout, phi::vectorize<int>(transformed_input.dims()));
cudnnTensorDescriptor_t cudnn_output_desc = output_desc.descriptor<T>(
layout, phi::vectorize<int>(transformed_output.dims()));
#endif
PoolingMode pooling_mode;
if (pooling_type == "max") {
pooling_mode = PoolingMode::kMaximum;
} else {
pooling_mode = exclusive ? PoolingMode::kAverageExclusive
: PoolingMode::kAverageInclusive;
}
#ifdef PADDLE_WITH_HIP
miopenPoolingDescriptor_t cudnn_pool_desc =
pool_desc.descriptor(pooling_mode, ksize, paddings, strides);
#else
cudnnPoolingDescriptor_t cudnn_pool_desc =
pool_desc.descriptor(pooling_mode, ksize, paddings, strides);
#endif
// ------------------- cudnn pool algorithm ---------------------
auto handle = ctx.cuda_device_context().cudnn_handle();
ScalingParamType<T> alpha = 1.0f, beta = 0.0f;
#ifdef PADDLE_WITH_HIP
char *pool_workspace;
size_t pool_worksize = 0;
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::miopenPoolingGetWorkSpaceSizeV2(
cudnn_pool_desc, cudnn_output_desc, &pool_worksize));
PADDLE_ENFORCE_GPU_SUCCESS(hipMalloc(&pool_workspace, pool_worksize));
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::miopenPoolingForward(
handle, cudnn_pool_desc, &alpha, cudnn_input_desc,
tranformed_input_data, &beta, cudnn_output_desc, tranformed_output_data,
false, pool_workspace, pool_worksize));
PADDLE_ENFORCE_GPU_SUCCESS(hipFree(pool_workspace));
#else
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnPoolingForward(
handle, cudnn_pool_desc, &alpha, cudnn_input_desc,
tranformed_input_data, &beta, cudnn_output_desc,
tranformed_output_data));
#endif
// add
if (data_format == str_NDHWC) {
auto &dev_ctx =
ctx.template device_context<paddle::platform::CUDADeviceContext>();
std::vector<int> axis{0, 2, 3, 4, 1};
phi::funcs::Transpose<paddle::platform::CUDADeviceContext, T, 5>
trans5_v2;
trans5_v2(dev_ctx, transformed_output, output, axis);
}
#ifdef PADDLE_WITH_HIP
// MIOPEN not support NHWC data layout
if (data_format == str_NHWC) {
auto &dev_ctx =
ctx.template device_context<paddle::platform::CUDADeviceContext>();
std::vector<int> axis{0, 2, 3, 1};
phi::funcs::Transpose<paddle::platform::CUDADeviceContext, T, 4> trans;
trans(dev_ctx, transformed_output, output, axis);
}
#endif
}
};
template <typename T>
class PoolCUDNNGradOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &ctx) const override {
PADDLE_ENFORCE_EQ(
platform::is_gpu_place(ctx.GetPlace()), true,
platform::errors::InvalidArgument("Pool operator CUDA kernel must use "
"CUDAPlace rather than CPUPlace."));
const Tensor *input = ctx.Input<Tensor>("X");
const Tensor *output = ctx.Input<Tensor>("Out");
const Tensor *output_grad =
ctx.Input<Tensor>(framework::GradVarName("Out"));
Tensor *input_grad = ctx.Output<Tensor>(framework::GradVarName("X"));
std::string pooling_type = ctx.Attr<std::string>("pooling_type");
bool exclusive = ctx.Attr<bool>("exclusive");
bool adaptive = ctx.Attr<bool>("adaptive");
std::vector<int> ksize = ctx.Attr<std::vector<int>>("ksize");
std::vector<int> strides = ctx.Attr<std::vector<int>>("strides");
std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings");
std::string data_format = ctx.Attr<std::string>("data_format");
bool global_pooling = ctx.Attr<bool>("global_pooling");
std::string padding_algorithm = ctx.Attr<std::string>("padding_algorithm");
const bool channel_last = (data_format == "NHWC" || data_format == "NDHWC");
#ifdef PADDLE_WITH_HIP
if (pooling_type == "max") {
using OpKernelMap = paddle::framework::OperatorWithKernel::OpKernelMap;
using OpKernelFunc = paddle::framework::OperatorWithKernel::OpKernelFunc;
auto &all_op_kernels =
paddle::framework::OperatorWithKernel::AllOpKernels();
std::string op_type = "pool2d_grad";
auto kernels_iter = all_op_kernels.find(op_type);
PADDLE_ENFORCE_NE(
kernels_iter, all_op_kernels.end(),
platform::errors::Unavailable(
"There are no kernels which are registered in the %s operator.",
op_type));
OpKernelMap &kernels = kernels_iter->second;
paddle::framework::OpKernelType expected_kernel_key(
paddle::framework::ToDataType(typeid(T)), ctx.GetPlace());
auto kernel_iter = kernels.find(expected_kernel_key);
PADDLE_ENFORCE_NE(kernel_iter, kernels.end(),
platform::errors::NotFound(
"Operator (%s) does not have kernel for %s.",
op_type, KernelTypeToString(expected_kernel_key)));
std::unique_ptr<OpKernelFunc> kernel_func_(
new OpKernelFunc(kernel_iter->second));
(*kernel_func_)(ctx);
return;
}
#endif
// update paddings
auto in_x_dims = input->dims();
framework::DDim data_dims;
if (channel_last) {
data_dims = phi::slice_ddim(in_x_dims, 1, in_x_dims.size() - 1);
} else {
data_dims = phi::slice_ddim(in_x_dims, 2, in_x_dims.size());
}
UpdatePadding(&paddings, global_pooling, adaptive, padding_algorithm,
data_dims, strides, ksize);
if (data_dims.size() * 2 == static_cast<int>(paddings.size())) {
for (int i = 0; i < data_dims.size(); ++i) {
paddings.erase(paddings.begin() + i + 1);
}
}
if (global_pooling) {
UpdateKsize(&ksize, data_dims);
}
// ------- tensor grad --------------
Tensor transformed_input(input->type());
Tensor transformed_output(output->type());
Tensor transformed_output_grad(output_grad->type());
input_grad->mutable_data<T>(ctx.GetPlace());
Tensor transformed_input_grad(input_grad->type());
DataLayout layout;
const std::string str_NCHW = "NCHW", str_NHWC = "NHWC";
const std::string str_NCDHW = "NCDHW", str_NDHWC = "NDHWC";
if (data_format == str_NDHWC) {
layout = DataLayout::kNCDHW;
auto &dev_ctx =
ctx.template device_context<paddle::platform::CUDADeviceContext>();
std::vector<int> axis{0, 4, 1, 2, 3};
// input
transformed_input.Resize(input->dims());
auto in_dims_vec = phi::vectorize(input->dims());
in_dims_vec[1] = input->dims()[4];
in_dims_vec[2] = input->dims()[1];
in_dims_vec[3] = input->dims()[2];
in_dims_vec[4] = input->dims()[3];
transformed_input.Resize(phi::make_ddim(in_dims_vec));
transformed_input.mutable_data(ctx.GetPlace(), input->type());
phi::funcs::Transpose<paddle::platform::CUDADeviceContext, T, 5> trans5;
trans5(dev_ctx, *input, &transformed_input, axis);
// output
transformed_output.Resize(output->dims());
auto out_dims_vec = phi::vectorize(output->dims());
out_dims_vec[1] = output->dims()[4];
out_dims_vec[2] = output->dims()[1];
out_dims_vec[3] = output->dims()[2];
out_dims_vec[4] = output->dims()[3];
transformed_output.Resize(phi::make_ddim(out_dims_vec));
transformed_output.mutable_data(ctx.GetPlace(), output->type());
phi::funcs::Transpose<paddle::platform::CUDADeviceContext, T, 5>
trans5_v2;
trans5_v2(dev_ctx, *output, &transformed_output, axis);
// output grad
transformed_output_grad.Resize(phi::make_ddim(out_dims_vec));
transformed_output_grad.mutable_data(ctx.GetPlace(), output_grad->type());
phi::funcs::Transpose<paddle::platform::CUDADeviceContext, T, 5>
trans5_v3;
trans5_v3(dev_ctx, *output_grad, &transformed_output_grad, axis);
// input grad
transformed_input_grad.Resize(phi::make_ddim(in_dims_vec));
#ifdef PADDLE_WITH_HIP
// MIOPEN not support NHWC data layout
} else if (data_format == str_NHWC) {
layout = DataLayout::kNCHW;
auto &dev_ctx =
ctx.template device_context<paddle::platform::CUDADeviceContext>();
std::vector<int> axis{0, 3, 1, 2};
// input
transformed_input.Resize(input->dims());
auto in_dims_vec = phi::vectorize(input->dims());
in_dims_vec[1] = input->dims()[3];
in_dims_vec[2] = input->dims()[1];
in_dims_vec[3] = input->dims()[2];
transformed_input.Resize(phi::make_ddim(in_dims_vec));
transformed_input.mutable_data(ctx.GetPlace(), input->type());
phi::funcs::Transpose<paddle::platform::CUDADeviceContext, T, 4> trans4;
trans4(dev_ctx, *input, &transformed_input, axis);
// output
transformed_output.Resize(output->dims());
auto out_dims_vec = phi::vectorize(output->dims());
out_dims_vec[1] = output->dims()[3];
out_dims_vec[2] = output->dims()[1];
out_dims_vec[3] = output->dims()[2];
transformed_output.Resize(phi::make_ddim(out_dims_vec));
transformed_output.mutable_data(ctx.GetPlace(), output->type());
phi::funcs::Transpose<paddle::platform::CUDADeviceContext, T, 4>
trans4_v2;
trans4_v2(dev_ctx, *output, &transformed_output, axis);
// output grad
transformed_output_grad.Resize(phi::make_ddim(out_dims_vec));
transformed_output_grad.mutable_data(ctx.GetPlace(), output_grad->type());
phi::funcs::Transpose<paddle::platform::CUDADeviceContext, T, 4>
trans4_v3;
trans4_v3(dev_ctx, *output_grad, &transformed_output_grad, axis);
// input grad
transformed_input_grad.Resize(phi::make_ddim(in_dims_vec));
#endif
} else {
layout = getLayoutFromStr(data_format);
transformed_input = *input;
transformed_output = *output;
transformed_output_grad = *output_grad;
transformed_input_grad = *input_grad;
}
const T *input_data = transformed_input.data<T>();
const T *output_data = transformed_output.data<T>();
const T *output_grad_data = transformed_output_grad.data<T>();
// ------------------- cudnn descriptors ---------------------
ScopedTensorDescriptor input_desc;
ScopedTensorDescriptor output_desc;
ScopedPoolingDescriptor pool_desc;
#ifdef PADDLE_WITH_HIP
miopenTensorDescriptor_t cudnn_input_desc = input_desc.descriptor<T>(
layout, phi::vectorize<int>(transformed_input.dims()));
miopenTensorDescriptor_t cudnn_output_desc = output_desc.descriptor<T>(
layout, phi::vectorize<int>(transformed_output.dims()));
#else
cudnnTensorDescriptor_t cudnn_input_desc = input_desc.descriptor<T>(
layout, phi::vectorize<int>(transformed_input.dims()));
cudnnTensorDescriptor_t cudnn_output_desc = output_desc.descriptor<T>(
layout, phi::vectorize<int>(transformed_output.dims()));
#endif
PoolingMode pooling_mode;
if (pooling_type == "max") {
if (FLAGS_cudnn_deterministic) {
pooling_mode = PoolingMode::kMaximumDeterministic;
} else {
pooling_mode = PoolingMode::kMaximum;
}
} else {
pooling_mode = exclusive ? PoolingMode::kAverageExclusive
: PoolingMode::kAverageInclusive;
}
#ifdef PADDLE_WITH_HIP
miopenPoolingDescriptor_t cudnn_pool_desc =
pool_desc.descriptor(pooling_mode, ksize, paddings, strides);
#else
cudnnPoolingDescriptor_t cudnn_pool_desc =
pool_desc.descriptor(pooling_mode, ksize, paddings, strides);
#endif
// ------------------- cudnn pool algorithm ---------------------
auto handle = ctx.cuda_device_context().cudnn_handle();
ScalingParamType<T> alpha = 1.0f, beta = 0.0f;
if (input_grad) {
T *input_grad_data = transformed_input_grad.mutable_data<T>(
transformed_input_grad.dims(), ctx.GetPlace());
// Because beta is zero, it is unnecessary to reset input_grad.
#ifdef PADDLE_WITH_HIP
char *pool_workspace;
size_t pool_worksize = 0;
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::miopenPoolingGetWorkSpaceSizeV2(
cudnn_pool_desc, cudnn_output_desc, &pool_worksize));
PADDLE_ENFORCE_GPU_SUCCESS(hipMalloc(&pool_workspace, pool_worksize));
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::miopenPoolingBackward(
handle, cudnn_pool_desc, &alpha, cudnn_output_desc, output_data,
cudnn_output_desc, output_grad_data, cudnn_input_desc, input_data,
&beta, cudnn_input_desc, input_grad_data, pool_workspace));
PADDLE_ENFORCE_GPU_SUCCESS(hipFree(pool_workspace));
#else
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnPoolingBackward(
handle, cudnn_pool_desc, &alpha, cudnn_output_desc, output_data,
cudnn_output_desc, output_grad_data, cudnn_input_desc, input_data,
&beta, cudnn_input_desc, input_grad_data));
#endif
if (data_format == str_NDHWC) {
auto &dev_ctx =
ctx.template device_context<paddle::platform::CUDADeviceContext>();
std::vector<int> axis{0, 2, 3, 4, 1};
phi::funcs::Transpose<paddle::platform::CUDADeviceContext, T, 5>
trans5_v4;
trans5_v4(dev_ctx, transformed_input_grad, input_grad, axis);
}
#ifdef PADDLE_WITH_HIP
// MIOPEN not support NHWC data layout
if (data_format == str_NHWC) {
auto &dev_ctx =
ctx.template device_context<paddle::platform::CUDADeviceContext>();
std::vector<int> axis{0, 2, 3, 1};
phi::funcs::Transpose<paddle::platform::CUDADeviceContext, T, 4>
trans4_v4;
trans4_v4(dev_ctx, transformed_input_grad, input_grad, axis);
}
#endif
}
}
};
template <typename T>
class PoolCUDNNGradGradOpKernel : public PoolCUDNNOpKernel<T> {
public:
void Compute(const framework::ExecutionContext &ctx) const override {
std::string pooling_type = ctx.Attr<std::string>("pooling_type");
if (pooling_type == "max") {
PADDLE_THROW(platform::errors::InvalidArgument(
"Pool op grad grad only supports avgpool."));
} else {
PoolCUDNNOpKernel<T>::Compute(ctx);
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
namespace plat = paddle::platform;
#ifdef PADDLE_WITH_HIP
// MIOPEN do not support double
REGISTER_OP_KERNEL(pool2d, CUDNN, plat::CUDAPlace,
ops::PoolCUDNNOpKernel<float>,
ops::PoolCUDNNOpKernel<plat::float16>);
REGISTER_OP_KERNEL(pool2d_grad, CUDNN, plat::CUDAPlace,
ops::PoolCUDNNGradOpKernel<float>,
ops::PoolCUDNNGradOpKernel<plat::float16>);
REGISTER_OP_KERNEL(pool3d, CUDNN, plat::CUDAPlace,
ops::PoolCUDNNOpKernel<float>,
ops::PoolCUDNNOpKernel<plat::float16>);
REGISTER_OP_KERNEL(pool3d_grad, CUDNN, plat::CUDAPlace,
ops::PoolCUDNNGradOpKernel<float>);
#else
REGISTER_OP_KERNEL(pool2d, CUDNN, plat::CUDAPlace,
ops::PoolCUDNNOpKernel<float>,
ops::PoolCUDNNOpKernel<double>,
ops::PoolCUDNNOpKernel<plat::float16>);
REGISTER_OP_KERNEL(pool2d_grad, CUDNN, plat::CUDAPlace,
ops::PoolCUDNNGradOpKernel<float>,
ops::PoolCUDNNGradOpKernel<double>,
ops::PoolCUDNNGradOpKernel<plat::float16>);
REGISTER_OP_KERNEL(pool2d_grad_grad, CUDNN, plat::CUDAPlace,
ops::PoolCUDNNGradGradOpKernel<float>,
ops::PoolCUDNNGradGradOpKernel<double>,
ops::PoolCUDNNGradGradOpKernel<plat::float16>);
REGISTER_OP_KERNEL(pool3d, CUDNN, plat::CUDAPlace,
ops::PoolCUDNNOpKernel<float>,
ops::PoolCUDNNOpKernel<double>,
ops::PoolCUDNNOpKernel<plat::float16>);
REGISTER_OP_KERNEL(pool3d_grad, CUDNN, plat::CUDAPlace,
ops::PoolCUDNNGradOpKernel<float>,
ops::PoolCUDNNGradOpKernel<double>);
#endif
...@@ -15,6 +15,12 @@ limitations under the License. */ ...@@ -15,6 +15,12 @@ limitations under the License. */
#include "paddle/fluid/operators/pool_op.h" #include "paddle/fluid/operators/pool_op.h"
#include <unordered_map> #include <unordered_map>
#include "paddle/fluid/framework/infershape_utils.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/phi/core/infermeta_utils.h"
#include "paddle/phi/infermeta/backward.h"
#include "paddle/phi/infermeta/unary.h"
#include "paddle/fluid/platform/device/gpu/gpu_dnn.h" #include "paddle/fluid/platform/device/gpu/gpu_dnn.h"
#ifdef PADDLE_WITH_MKLDNN #ifdef PADDLE_WITH_MKLDNN
#include "paddle/fluid/platform/mkldnn_helper.h" #include "paddle/fluid/platform/mkldnn_helper.h"
...@@ -23,125 +29,6 @@ limitations under the License. */ ...@@ -23,125 +29,6 @@ limitations under the License. */
namespace paddle { namespace paddle {
namespace operators { namespace operators {
int PoolOutputSize(int input_size, int filter_size, int padding_1,
int padding_2, int stride, bool ceil_mode) {
int output_size;
if (!ceil_mode) {
output_size =
(input_size - filter_size + padding_1 + padding_2) / stride + 1;
} else {
output_size =
(input_size - filter_size + padding_1 + padding_2 + stride - 1) /
stride +
1;
}
PADDLE_ENFORCE_GT(
output_size, 0,
platform::errors::InvalidArgument(
"the output size must be greater than 0. But received: "
"output_size = %d due to the settings of input_size(%d), "
"padding(%d,%d), "
"k_size(%d) and stride(%d). Please check again!",
output_size, input_size, padding_1, padding_2, filter_size, stride));
return output_size;
}
void PoolOp::InferShape(framework::InferShapeContext* ctx) const {
PADDLE_ENFORCE_EQ(
ctx->HasInput("X"), true,
platform::errors::NotFound("Input(X) of Pool operator is not found."));
PADDLE_ENFORCE_EQ(
ctx->HasOutput("Out"), true,
platform::errors::NotFound("Output(Out) of Pool operator is not found."));
std::string pooling_type = ctx->Attrs().Get<std::string>("pooling_type");
std::vector<int> ksize = ctx->Attrs().Get<std::vector<int>>("ksize");
std::vector<int> strides = ctx->Attrs().Get<std::vector<int>>("strides");
std::vector<int> paddings = ctx->Attrs().Get<std::vector<int>>("paddings");
bool ceil_mode = ctx->Attrs().Get<bool>("ceil_mode");
bool adaptive = ctx->Attrs().Get<bool>("adaptive");
bool global_pooling = ctx->Attrs().Get<bool>("global_pooling");
std::string data_format = ctx->Attrs().Get<std::string>("data_format");
std::string padding_algorithm =
ctx->Attrs().Get<std::string>("padding_algorithm");
auto in_x_dims = ctx->GetInputDim("X");
PADDLE_ENFORCE_EQ(
in_x_dims.size() == 4 || in_x_dims.size() == 5, true,
platform::errors::InvalidArgument(
"the input of Op(pool) should be 4-D or 5-D Tensor. But "
"received: %u-D Tensor and it's shape is [%s].",
in_x_dims.size(), in_x_dims));
PADDLE_ENFORCE_EQ(
in_x_dims.size() - ksize.size(), 2U,
platform::errors::InvalidArgument(
"the dimension of input minus the size of "
"Attr(ksize) must be euqal to 2 in Op(pool). "
"But received: the dimension of input minus the size "
"of Attr(ksize) is %d, the "
"input's dimension is %d, the shape of input "
"is [%s], the Attr(ksize)'s size is %d, the Attr(ksize) is [%s].",
in_x_dims.size() - ksize.size(), in_x_dims.size(), in_x_dims,
ksize.size(), phi::make_ddim(ksize)));
PADDLE_ENFORCE_EQ(
ksize.size(), strides.size(),
platform::errors::InvalidArgument(
"the size of Attr(ksize) and Attr(strides) in "
"Op(pool) must be equal. "
"But received: Attr(ksize)'s size is %d, Attr(strides)'s "
"size is %d, Attr(ksize) is [%s], Attr(strides)is [%s].",
ksize.size(), strides.size(), phi::make_ddim(ksize),
phi::make_ddim(strides)));
// MKL-DNN Kernels are using NCHW order of dims description
// so we ignore data_format consideration for MKL-DNN kernel
const bool channel_last = (ctx->IsRunMKLDNNKernel() == false) &&
(data_format == "NHWC" || data_format == "NDHWC");
// update paddings if "SAME" or global_pooling
framework::DDim data_dims;
if (channel_last) {
data_dims = phi::slice_ddim(in_x_dims, 1, in_x_dims.size() - 1);
} else {
data_dims = phi::slice_ddim(in_x_dims, 2, in_x_dims.size());
}
UpdatePadding(&paddings, global_pooling, adaptive, padding_algorithm,
data_dims, strides, ksize);
if (global_pooling) {
UpdateKsize(&ksize, data_dims);
}
std::vector<int64_t> output_shape;
if (adaptive) {
output_shape.insert(output_shape.end(), ksize.begin(), ksize.end());
} else {
for (int i = 0; i < data_dims.size(); ++i) {
if ((!ctx->IsRuntime()) && (data_dims[i] < 0)) {
output_shape.push_back(data_dims[i]);
} else {
output_shape.push_back(
PoolOutputSize(data_dims[i], ksize[i], paddings[2 * i],
paddings[2 * i + 1], strides[i], ceil_mode));
}
}
}
// output_N = input_N
output_shape.insert(output_shape.begin(), in_x_dims[0]);
// output_C = input_C
if (channel_last) {
output_shape.push_back(in_x_dims[in_x_dims.size() - 1]);
} else {
output_shape.insert(output_shape.begin() + 1, in_x_dims[1]);
}
ctx->SetOutputDim("Out", phi::make_ddim(output_shape));
ctx->ShareLoD("X", "Out");
}
bool CanMKLDNNSupportPool(const framework::ExecutionContext& ctx) { bool CanMKLDNNSupportPool(const framework::ExecutionContext& ctx) {
if (ctx.Attr<bool>("adaptive") == false) return true; if (ctx.Attr<bool>("adaptive") == false) return true;
// (jczaja): oneDNN is supporting only unchangable in size pool window // (jczaja): oneDNN is supporting only unchangable in size pool window
...@@ -216,16 +103,6 @@ framework::OpKernelType PoolOp::GetKernelTypeForVar( ...@@ -216,16 +103,6 @@ framework::OpKernelType PoolOp::GetKernelTypeForVar(
tensor.place(), tensor.layout()); tensor.place(), tensor.layout());
} }
void PoolOpGrad::InferShape(framework::InferShapeContext* ctx) const {
PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true,
platform::errors::NotFound(
"Input(X) of Pool Gradoperator is not found."));
PADDLE_ENFORCE_EQ(ctx->HasOutput(framework::GradVarName("X")), true,
platform::errors::NotFound(
"Input(X@GRAD) of Pool Gradoperator is not found."));
ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X"));
}
framework::OpKernelType PoolOpGrad::GetExpectedKernelType( framework::OpKernelType PoolOpGrad::GetExpectedKernelType(
const framework::ExecutionContext& ctx) const { const framework::ExecutionContext& ctx) const {
framework::LibraryType library_{framework::LibraryType::kPlain}; framework::LibraryType library_{framework::LibraryType::kPlain};
...@@ -471,7 +348,7 @@ class Pool2dOpGradGradMaker : public framework::SingleGradOpMaker<T> { ...@@ -471,7 +348,7 @@ class Pool2dOpGradGradMaker : public framework::SingleGradOpMaker<T> {
protected: protected:
void Apply(GradOpPtr<T> grad_op) const override { void Apply(GradOpPtr<T> grad_op) const override {
grad_op->SetType("pool2d_grad_grad"); grad_op->SetType("pool2d_double_grad");
grad_op->SetInput("X", this->OutputGrad(framework::GradVarName("X"))); grad_op->SetInput("X", this->OutputGrad(framework::GradVarName("X")));
grad_op->SetOutput("Out", this->InputGrad(framework::GradVarName("Out"))); grad_op->SetOutput("Out", this->InputGrad(framework::GradVarName("Out")));
grad_op->SetAttrMap(this->Attrs()); grad_op->SetAttrMap(this->Attrs());
...@@ -692,35 +569,34 @@ Example: ...@@ -692,35 +569,34 @@ Example:
namespace ops = paddle::operators; namespace ops = paddle::operators;
DECLARE_INFER_SHAPE_FUNCTOR(pool2d, Pool2dInferShapeFunctor,
PD_INFER_META(phi::PoolInferMeta));
DECLARE_INFER_SHAPE_FUNCTOR(pool2d_grad, Pool2dGradInferShapeFunctor,
PD_INFER_META(phi::PoolGradInferMeta));
DECLARE_INFER_SHAPE_FUNCTOR(pool2d_double_grad,
Pool2dDoubleGradInferShapeFunctor,
PD_INFER_META(phi::PoolInferMeta));
REGISTER_OPERATOR( REGISTER_OPERATOR(
pool2d, ops::PoolOp, ops::Pool2dOpMaker, ops::PoolOpInferVarType, pool2d, ops::PoolOp, ops::Pool2dOpMaker, ops::PoolOpInferVarType,
paddle::framework::DefaultGradOpMaker<paddle::framework::OpDesc, true>, paddle::framework::DefaultGradOpMaker<paddle::framework::OpDesc, true>,
paddle::framework::DefaultGradOpMaker<paddle::imperative::OpBase, true>); paddle::framework::DefaultGradOpMaker<paddle::imperative::OpBase, true>,
Pool2dInferShapeFunctor);
REGISTER_OPERATOR(pool2d_grad, ops::PoolOpGrad, REGISTER_OPERATOR(pool2d_grad, ops::PoolOpGrad,
ops::Pool2dOpGradGradMaker<paddle::framework::OpDesc>, ops::Pool2dOpGradGradMaker<paddle::framework::OpDesc>,
ops::Pool2dOpGradGradMaker<paddle::imperative::OpBase>); ops::Pool2dOpGradGradMaker<paddle::imperative::OpBase>,
REGISTER_OPERATOR(pool2d_grad_grad, ops::PoolOp); Pool2dGradInferShapeFunctor);
REGISTER_OPERATOR(pool2d_double_grad, ops::PoolOp,
REGISTER_OP_CPU_KERNEL( Pool2dDoubleGradInferShapeFunctor);
pool2d, ops::PoolKernel<paddle::platform::CPUDeviceContext, float>,
ops::PoolKernel<paddle::platform::CPUDeviceContext, double>); DECLARE_INFER_SHAPE_FUNCTOR(pool3d, Pool3dInferShapeFunctor,
REGISTER_OP_CPU_KERNEL( PD_INFER_META(phi::PoolInferMeta));
pool2d_grad, ops::PoolGradKernel<paddle::platform::CPUDeviceContext, float>, DECLARE_INFER_SHAPE_FUNCTOR(pool3d_grad, Pool3dGradInferShapeFunctor,
ops::PoolGradKernel<paddle::platform::CPUDeviceContext, double>); PD_INFER_META(phi::PoolGradInferMeta));
REGISTER_OP_CPU_KERNEL(
pool2d_grad_grad,
ops::PoolGradGradKernel<paddle::platform::CPUDeviceContext, float>,
ops::PoolGradGradKernel<paddle::platform::CPUDeviceContext, double>);
REGISTER_OPERATOR( REGISTER_OPERATOR(
pool3d, ops::PoolOp, ops::Pool3dOpMaker, ops::PoolOpInferVarType, pool3d, ops::PoolOp, ops::Pool3dOpMaker, ops::PoolOpInferVarType,
paddle::framework::DefaultGradOpMaker<paddle::framework::OpDesc, true>, paddle::framework::DefaultGradOpMaker<paddle::framework::OpDesc, true>,
paddle::framework::DefaultGradOpMaker<paddle::imperative::OpBase, true>); paddle::framework::DefaultGradOpMaker<paddle::imperative::OpBase, true>,
REGISTER_OPERATOR(pool3d_grad, ops::PoolOpGrad); Pool3dInferShapeFunctor);
REGISTER_OPERATOR(pool3d_grad, ops::PoolOpGrad, Pool3dGradInferShapeFunctor);
REGISTER_OP_CPU_KERNEL(
pool3d, ops::PoolKernel<paddle::platform::CPUDeviceContext, float>,
ops::PoolKernel<paddle::platform::CPUDeviceContext, double>);
REGISTER_OP_CPU_KERNEL(
pool3d_grad, ops::PoolGradKernel<paddle::platform::CPUDeviceContext, float>,
ops::PoolGradKernel<paddle::platform::CPUDeviceContext, double>);
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/pool_op.h"
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
pool2d, ops::PoolKernel<paddle::platform::CUDADeviceContext, float>,
ops::PoolKernel<paddle::platform::CUDADeviceContext, double>,
ops::PoolKernel<paddle::platform::CUDADeviceContext,
paddle::platform::float16>);
REGISTER_OP_CUDA_KERNEL(
pool2d_grad,
ops::PoolGradKernel<paddle::platform::CUDADeviceContext, float>,
ops::PoolGradKernel<paddle::platform::CUDADeviceContext, double>,
ops::PoolGradKernel<paddle::platform::CUDADeviceContext,
paddle::platform::float16>);
REGISTER_OP_CUDA_KERNEL(
pool2d_grad_grad,
ops::PoolGradGradKernel<paddle::platform::CUDADeviceContext, float>,
ops::PoolGradGradKernel<paddle::platform::CUDADeviceContext, double>,
ops::PoolGradGradKernel<paddle::platform::CUDADeviceContext,
paddle::platform::float16>);
REGISTER_OP_CUDA_KERNEL(
pool3d, ops::PoolKernel<paddle::platform::CUDADeviceContext, float>,
ops::PoolKernel<paddle::platform::CUDADeviceContext, double>,
ops::PoolKernel<paddle::platform::CUDADeviceContext,
paddle::platform::float16>);
REGISTER_OP_CUDA_KERNEL(
pool3d_grad,
ops::PoolGradKernel<paddle::platform::CUDADeviceContext, float>,
ops::PoolGradKernel<paddle::platform::CUDADeviceContext, double>,
ops::PoolGradKernel<paddle::platform::CUDADeviceContext,
paddle::platform::float16>);
...@@ -12,19 +12,12 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ...@@ -12,19 +12,12 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#pragma once // NOTE(Ruibiao): Difficult to remove code from this header file because too
// many files rely on it through "mkldnn_reuse.h"
#include <algorithm> #pragma once
#include <string>
#include <vector>
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/math/pooling.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#if defined(__HIPCC__) || defined(__NVCC__)
#include "paddle/fluid/operators/reduce_ops/reduce_op.cu.h"
#endif
namespace paddle { namespace paddle {
namespace operators { namespace operators {
...@@ -35,8 +28,6 @@ class PoolOp : public framework::OperatorWithKernel { ...@@ -35,8 +28,6 @@ class PoolOp : public framework::OperatorWithKernel {
public: public:
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override;
protected: protected:
framework::OpKernelType GetExpectedKernelType( framework::OpKernelType GetExpectedKernelType(
const framework::ExecutionContext& ctx) const override; const framework::ExecutionContext& ctx) const override;
...@@ -50,8 +41,6 @@ class PoolOpGrad : public framework::OperatorWithKernel { ...@@ -50,8 +41,6 @@ class PoolOpGrad : public framework::OperatorWithKernel {
public: public:
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override;
protected: protected:
framework::OpKernelType GetExpectedKernelType( framework::OpKernelType GetExpectedKernelType(
const framework::ExecutionContext& ctx) const override; const framework::ExecutionContext& ctx) const override;
...@@ -71,292 +60,5 @@ class Pool3dOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -71,292 +60,5 @@ class Pool3dOpMaker : public framework::OpProtoAndCheckerMaker {
void Make() override; void Make() override;
}; };
template <typename T = int>
inline void UpdatePadding(std::vector<T>* paddings, const bool global_pooling,
const bool adaptive,
const std::string padding_algorithm,
const framework::DDim data_dims,
const std::vector<T>& strides,
const std::vector<T>& ksize) {
// set padding size == data_dims.size() * 2
auto data_shape = phi::vectorize<T>(data_dims);
if (static_cast<int>(paddings->size()) == data_dims.size()) {
for (int i = 0; i < data_dims.size(); ++i) {
T copy_pad = *(paddings->begin() + 2 * i);
paddings->insert(paddings->begin() + 2 * i + 1, copy_pad);
}
} else {
PADDLE_ENFORCE_EQ(data_dims.size() * 2, paddings->size(),
platform::errors::InvalidArgument(
"Paddings size %d should be the same or twice as the "
"pooling size %d.",
paddings->size(), data_dims.size() * 2));
}
// when padding_algorithm is "VALID" or "SAME"
if (padding_algorithm == "SAME") {
for (int i = 0; i < data_dims.size(); ++i) {
T out_size = (data_dims[i] + strides[i] - 1) / strides[i];
T pad_sum =
std::max((out_size - 1) * strides[i] + ksize[i] - data_shape[i],
static_cast<T>(0));
T pad_0 = pad_sum / 2;
T pad_1 = pad_sum - pad_0;
*(paddings->begin() + i * 2) = pad_0;
*(paddings->begin() + i * 2 + 1) = pad_1;
}
} else if (padding_algorithm == "VALID") {
for (auto it = paddings->begin(); it != paddings->end(); it++) {
*it = 0;
}
}
// if global_pooling == true or adaptive == true, padding will be ignore
if (global_pooling || adaptive) {
for (auto it = paddings->begin(); it != paddings->end(); it++) {
*it = 0;
}
}
}
template <typename T = int>
inline void UpdateKsize(std::vector<T>* ksize,
const framework::DDim data_dims) {
ksize->resize(static_cast<size_t>(data_dims.size()));
for (size_t i = 0; i < ksize->size(); ++i) {
*(ksize->begin() + i) = static_cast<T>(data_dims[i]);
}
}
inline int getReduceNum(const framework::Tensor& input,
const framework::Tensor* output,
const std::string data_format,
std::vector<int>* reduce_dim) {
// data_format only can be NCHW
bool channel_last = (data_format == "NHWC");
if (channel_last) {
return 0;
}
int reduce_num = 0;
const int output_height = output->dims()[2];
const int output_width = output->dims()[3];
if ((output_height == 1) && (output_width == 1)) {
reduce_dim->push_back(2);
reduce_dim->push_back(3);
reduce_num = input.dims()[2] * input.dims()[3];
}
return reduce_num;
}
template <typename DeviceContext, typename T>
class PoolKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
const Tensor* in_x = context.Input<Tensor>("X");
Tensor* out = context.Output<Tensor>("Out");
std::string pooling_type = context.Attr<std::string>("pooling_type");
std::vector<int> ksize = context.Attr<std::vector<int>>("ksize");
std::vector<int> strides = context.Attr<std::vector<int>>("strides");
std::vector<int> paddings = context.Attr<std::vector<int>>("paddings");
std::string data_format = context.Attr<std::string>("data_format");
bool exclusive = context.Attr<bool>("exclusive");
bool adaptive = context.Attr<bool>("adaptive");
bool global_pooling = context.Attr<bool>("global_pooling");
std::string padding_algorithm =
context.Attr<std::string>("padding_algorithm");
const bool channel_last = (data_format == "NHWC" || data_format == "NDHWC");
// update paddings
auto in_x_dims = in_x->dims();
framework::DDim data_dims;
if (channel_last) {
data_dims = phi::slice_ddim(in_x_dims, 1, in_x_dims.size() - 1);
} else {
data_dims = phi::slice_ddim(in_x_dims, 2, in_x_dims.size());
}
UpdatePadding(&paddings, global_pooling, adaptive, padding_algorithm,
data_dims, strides, ksize);
if (data_dims.size() * 2 == static_cast<int>(paddings.size())) {
for (int i = 0; i < data_dims.size(); ++i) {
paddings.erase(paddings.begin() + i + 1);
}
}
if (global_pooling) {
UpdateKsize(&ksize, data_dims);
}
auto& dev_ctx = context.template device_context<DeviceContext>();
switch (ksize.size()) {
case 2: {
if (pooling_type == "max") {
paddle::operators::math::Pool2dFunctor<
DeviceContext, paddle::operators::math::MaxPool<T>, T>
pool2d_forward;
paddle::operators::math::MaxPool<T> pool_process;
pool2d_forward(dev_ctx, *in_x, ksize, strides, paddings, data_format,
true, false, out, pool_process);
} else if (pooling_type == "avg") {
std::vector<int> reduce_dim;
int reduce_num = getReduceNum(*in_x, out, data_format, &reduce_dim);
if (reduce_num > 0 &&
adaptive) { // for adaptive_avg_pool2d && output_size == 1
#if defined(__HIPCC__) || defined(__NVCC__)
auto stream = dev_ctx.stream();
TensorReduceImpl<T, T, kps::AddFunctor, kps::DivideFunctor<T>>(
dev_ctx, *in_x, out, kps::DivideFunctor<T>(reduce_num),
reduce_dim, stream);
#else // for cpu
paddle::operators::math::Pool2dFunctor<
DeviceContext, paddle::operators::math::AvgPool<T>, T>
pool2d_forward;
paddle::operators::math::AvgPool<T> pool_process;
pool2d_forward(dev_ctx, *in_x, ksize, strides, paddings,
data_format, exclusive, adaptive, out, pool_process);
#endif
} else { // avgpool_2d or adaptive_avg_pool2d && output_size != 1
paddle::operators::math::Pool2dFunctor<
DeviceContext, paddle::operators::math::AvgPool<T>, T>
pool2d_forward;
paddle::operators::math::AvgPool<T> pool_process;
pool2d_forward(dev_ctx, *in_x, ksize, strides, paddings,
data_format, exclusive, adaptive, out, pool_process);
}
}
} break;
case 3: {
if (pooling_type == "max") {
paddle::operators::math::Pool3dFunctor<
DeviceContext, paddle::operators::math::MaxPool<T>, T>
pool3d_forward;
paddle::operators::math::MaxPool<T> pool_process;
pool3d_forward(dev_ctx, *in_x, ksize, strides, paddings, data_format,
true, false, out, pool_process);
} else if (pooling_type == "avg") {
paddle::operators::math::Pool3dFunctor<
DeviceContext, paddle::operators::math::AvgPool<T>, T>
pool3d_forward;
paddle::operators::math::AvgPool<T> pool_process;
pool3d_forward(dev_ctx, *in_x, ksize, strides, paddings, data_format,
exclusive, adaptive, out, pool_process);
}
} break;
default: {
PADDLE_THROW(platform::errors::InvalidArgument(
"Pool op only supports 2D and 3D input."));
}
}
}
};
template <typename DeviceContext, typename T>
class PoolGradKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
const Tensor* in_x = context.Input<Tensor>("X");
const Tensor* out = context.Input<Tensor>("Out");
const Tensor* out_grad =
context.Input<Tensor>(framework::GradVarName("Out"));
Tensor* in_x_grad = context.Output<Tensor>(framework::GradVarName("X"));
std::string pooling_type = context.Attr<std::string>("pooling_type");
std::vector<int> ksize = context.Attr<std::vector<int>>("ksize");
std::vector<int> strides = context.Attr<std::vector<int>>("strides");
std::vector<int> paddings = context.Attr<std::vector<int>>("paddings");
bool exclusive = context.Attr<bool>("exclusive");
bool adaptive = context.Attr<bool>("adaptive");
std::string data_format = context.Attr<std::string>("data_format");
bool global_pooling = context.Attr<bool>("global_pooling");
std::string padding_algorithm =
context.Attr<std::string>("padding_algorithm");
const bool channel_last = (data_format == "NHWC" || data_format == "NDHWC");
// update paddings
auto in_x_dims = in_x->dims();
framework::DDim data_dims;
if (channel_last) {
data_dims = phi::slice_ddim(in_x_dims, 1, in_x_dims.size() - 1);
} else {
data_dims = phi::slice_ddim(in_x_dims, 2, in_x_dims.size());
}
UpdatePadding(&paddings, global_pooling, adaptive, padding_algorithm,
data_dims, strides, ksize);
if (data_dims.size() * 2 == static_cast<int>(paddings.size())) {
for (int i = 0; i < data_dims.size(); ++i) {
paddings.erase(paddings.begin() + i + 1);
}
}
if (global_pooling) {
UpdateKsize(&ksize, data_dims);
}
auto& dev_ctx = context.template device_context<DeviceContext>();
if (in_x_grad) {
in_x_grad->mutable_data<T>(context.GetPlace());
phi::funcs::SetConstant<DeviceContext, T> set_constant;
set_constant(dev_ctx, in_x_grad, static_cast<T>(0.0));
switch (ksize.size()) {
case 2: {
if (pooling_type == "max") {
paddle::operators::math::MaxPool2dGradFunctor<DeviceContext, T>
pool2d_backward;
pool2d_backward(dev_ctx, *in_x, *out, *out_grad, ksize, strides,
paddings, data_format, in_x_grad);
} else if (pooling_type == "avg") {
paddle::operators::math::Pool2dGradFunctor<
DeviceContext, paddle::operators::math::AvgPoolGrad<T>, T>
pool2d_backward;
paddle::operators::math::AvgPoolGrad<T> pool_process;
pool2d_backward(dev_ctx, *in_x, *out, *out_grad, ksize, strides,
paddings, data_format, exclusive, adaptive,
in_x_grad, pool_process);
}
} break;
case 3: {
if (pooling_type == "max") {
paddle::operators::math::MaxPool3dGradFunctor<DeviceContext, T>
pool3d_backward;
pool3d_backward(dev_ctx, *in_x, *out, *out_grad, ksize, strides,
paddings, data_format, in_x_grad);
} else if (pooling_type == "avg") {
paddle::operators::math::Pool3dGradFunctor<
DeviceContext, paddle::operators::math::AvgPoolGrad<T>, T>
pool3d_backward;
paddle::operators::math::AvgPoolGrad<T> pool_process;
pool3d_backward(dev_ctx, *in_x, *out, *out_grad, ksize, strides,
paddings, data_format, exclusive, adaptive,
in_x_grad, pool_process);
}
} break;
default: {
PADDLE_THROW(platform::errors::InvalidArgument(
"Pool op only supports 2D and 3D input."));
}
}
}
}
};
template <typename DeviceContext, typename T>
class PoolGradGradKernel : public PoolKernel<DeviceContext, T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
std::string pooling_type = context.Attr<std::string>("pooling_type");
if (pooling_type == "max") {
PADDLE_THROW(platform::errors::InvalidArgument(
"Pool op grad grad only supports avgpool."));
} else {
PoolKernel<DeviceContext, T>::Compute(context);
}
}
};
} // namespace operators } // namespace operators
} // namespace paddle } // namespace paddle
...@@ -12,8 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ...@@ -12,8 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "paddle/fluid/operators/pool_op.h" #include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/mlu/mlu_baseop.h" #include "paddle/fluid/operators/mlu/mlu_baseop.h"
#include "paddle/phi/kernels/funcs/pooling.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
...@@ -80,10 +81,10 @@ class MLUPoolOpKernel : public framework::OpKernel<T> { ...@@ -80,10 +81,10 @@ class MLUPoolOpKernel : public framework::OpKernel<T> {
data_dims = phi::slice_ddim(in_x_dims, 1, in_x_dims.size() - 1); data_dims = phi::slice_ddim(in_x_dims, 1, in_x_dims.size() - 1);
} }
UpdatePadding(&paddings, global_pooling, adaptive, padding_algorithm, phi::funcs::UpdatePadding(&paddings, global_pooling, adaptive,
data_dims, strides, ksize); padding_algorithm, data_dims, strides, ksize);
if (global_pooling) { if (global_pooling) {
UpdateKsize(&ksize, data_dims); phi::funcs::UpdateKernelSize(&ksize, data_dims);
} }
MLUCnnlTensorDesc in_x_desc(*in_x, cnnl_layout, ToCnnlDataType<T>()); MLUCnnlTensorDesc in_x_desc(*in_x, cnnl_layout, ToCnnlDataType<T>());
...@@ -191,10 +192,10 @@ class MLUPoolGradOpKernel : public framework::OpKernel<T> { ...@@ -191,10 +192,10 @@ class MLUPoolGradOpKernel : public framework::OpKernel<T> {
data_dims = phi::slice_ddim(in_x_dims, 1, in_x_dims.size() - 1); data_dims = phi::slice_ddim(in_x_dims, 1, in_x_dims.size() - 1);
} }
UpdatePadding(&paddings, global_pooling, adaptive, padding_algorithm, phi::funcs::UpdatePadding(&paddings, global_pooling, adaptive,
data_dims, strides, ksize); padding_algorithm, data_dims, strides, ksize);
if (global_pooling) { if (global_pooling) {
UpdateKsize(&ksize, data_dims); phi::funcs::UpdateKernelSize(&ksize, data_dims);
} }
// inputs need with NHWC layout // inputs need with NHWC layout
......
...@@ -11,8 +11,10 @@ distributed under the License is distributed on an "AS IS" BASIS, ...@@ -11,8 +11,10 @@ distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "paddle/fluid/operators/pool_op.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h" #include "paddle/fluid/platform/device/npu/npu_op_runner.h"
#include "paddle/phi/kernels/funcs/pooling.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
...@@ -68,8 +70,8 @@ class NPUPoolOpKernel : public framework::OpKernel<T> { ...@@ -68,8 +70,8 @@ class NPUPoolOpKernel : public framework::OpKernel<T> {
strides_vec[2] = strides[0]; strides_vec[2] = strides[0];
strides_vec[3] = strides[1]; strides_vec[3] = strides[1];
} }
UpdatePadding(&paddings, global_pooling, adaptive, padding_algorithm, phi::funcs::UpdatePadding(&paddings, global_pooling, adaptive,
data_dims, strides, ksize); padding_algorithm, data_dims, strides, ksize);
PADDLE_ENFORCE_LT( PADDLE_ENFORCE_LT(
std::max(paddings[0], paddings[1]), ksize[0], std::max(paddings[0], paddings[1]), ksize[0],
platform::errors::InvalidArgument( platform::errors::InvalidArgument(
...@@ -201,8 +203,8 @@ class NPUPoolGradOpKernel : public framework::OpKernel<T> { ...@@ -201,8 +203,8 @@ class NPUPoolGradOpKernel : public framework::OpKernel<T> {
strides_vec[2] = strides[0]; strides_vec[2] = strides[0];
strides_vec[3] = strides[1]; strides_vec[3] = strides[1];
} }
UpdatePadding(&paddings, global_pooling, adaptive, padding_algorithm, phi::funcs::UpdatePadding(&paddings, global_pooling, adaptive,
data_dims, strides, ksize); padding_algorithm, data_dims, strides, ksize);
PADDLE_ENFORCE_LT( PADDLE_ENFORCE_LT(
std::max(paddings[0], paddings[1]), ksize[0], std::max(paddings[0], paddings[1]), ksize[0],
......
...@@ -8,13 +8,17 @@ distributed under the License is distributed on an "AS IS" BASIS, ...@@ -8,13 +8,17 @@ distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "paddle/fluid/operators/pool_op.h"
#include <unordered_map> #include <unordered_map>
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/tensor.h"
#ifdef PADDLE_WITH_XPU #ifdef PADDLE_WITH_XPU
namespace paddle { namespace paddle {
namespace operators { namespace operators {
using framework::Tensor;
xpu::Pooling_t XPUPoolingType(const std::string& pooltype, bool exclusive, xpu::Pooling_t XPUPoolingType(const std::string& pooltype, bool exclusive,
bool is_test) { bool is_test) {
if (pooltype == "max") { if (pooltype == "max") {
......
...@@ -12,8 +12,12 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ...@@ -12,8 +12,12 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "paddle/fluid/operators/pool_with_index_op.h"
#include <memory> #include <memory>
#include "paddle/fluid/framework/infershape_utils.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/phi/core/infermeta_utils.h"
#include "paddle/phi/infermeta/backward.h"
#include "paddle/phi/infermeta/unary.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
...@@ -28,71 +32,6 @@ class MaxPoolWithIndexOp : public framework::OperatorWithKernel { ...@@ -28,71 +32,6 @@ class MaxPoolWithIndexOp : public framework::OperatorWithKernel {
public: public:
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true,
platform::errors::InvalidArgument(
"Input(X) of Pooling should not be null."));
PADDLE_ENFORCE_EQ(ctx->HasOutput("Out"), true,
platform::errors::InvalidArgument(
"Output(Out) of Pooling should not be null."));
PADDLE_ENFORCE_EQ(ctx->HasOutput("Mask"), true,
platform::errors::InvalidArgument(
"Output(Mask) of Pooling should not be null."));
auto in_x_dims = ctx->GetInputDim("X");
std::vector<int> ksize = ctx->Attrs().Get<std::vector<int>>("ksize");
std::vector<int> strides = ctx->Attrs().Get<std::vector<int>>("strides");
std::vector<int> paddings = ctx->Attrs().Get<std::vector<int>>("paddings");
bool adaptive = ctx->Attrs().Get<bool>("adaptive");
PADDLE_ENFORCE(
in_x_dims.size() == 4 || in_x_dims.size() == 5,
platform::errors::InvalidArgument("Pooling intput should be 4-D or 5-D "
"tensor but received %dD-Tensor",
in_x_dims.size()));
if (ctx->Attrs().Get<bool>("global_pooling")) {
ksize.resize(static_cast<size_t>(in_x_dims.size()) - 2);
for (size_t i = 0; i < ksize.size(); ++i) {
paddings[i] = 0;
ksize[i] = static_cast<int>(in_x_dims[i + 2]);
}
}
PADDLE_ENFORCE_EQ(
in_x_dims.size() - ksize.size(), 2U,
platform::errors::InvalidArgument(
"The input size %d minus the kernel size %d should equal to 2.",
in_x_dims.size(), ksize.size()));
PADDLE_ENFORCE_EQ(
ksize.size(), strides.size(),
platform::errors::InvalidArgument(
"Strides size %d and pooling size %d should be the same.",
strides.size(), ksize.size()));
PADDLE_ENFORCE_EQ(
ksize.size(), paddings.size(),
platform::errors::InvalidArgument(
"Paddings size %d and pooling size %d should be the same.",
paddings.size(), ksize.size()));
std::vector<int64_t> output_shape({in_x_dims[0], in_x_dims[1]});
if (adaptive) {
output_shape.insert(output_shape.end(), ksize.begin(), ksize.end());
} else {
for (size_t i = 0; i < ksize.size(); ++i) {
if ((!ctx->IsRuntime()) && (in_x_dims[i + 2] < 0)) {
output_shape.push_back(in_x_dims[i + 2]);
} else {
output_shape.push_back(MaxPoolOutputSize(in_x_dims[i + 2], ksize[i],
paddings[i], strides[i]));
}
}
}
ctx->SetOutputDim("Out", phi::make_ddim(output_shape));
ctx->SetOutputDim("Mask", phi::make_ddim(output_shape));
}
protected: protected:
framework::OpKernelType GetExpectedKernelType( framework::OpKernelType GetExpectedKernelType(
const framework::ExecutionContext &ctx) const override { const framework::ExecutionContext &ctx) const override {
...@@ -106,22 +45,6 @@ class MaxPoolWithIndexOpGrad : public framework::OperatorWithKernel { ...@@ -106,22 +45,6 @@ class MaxPoolWithIndexOpGrad : public framework::OperatorWithKernel {
public: public:
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE_EQ(
ctx->HasInput("Mask"), true,
platform::errors::InvalidArgument("Input(Mask) must not be null."));
PADDLE_ENFORCE_EQ(
ctx->HasInput("X"), true,
platform::errors::InvalidArgument("Input(X) must not be null."));
PADDLE_ENFORCE_EQ(ctx->HasInput(framework::GradVarName("Out")), true,
platform::errors::InvalidArgument(
"Input(Out@GRAD) should not be null."));
PADDLE_ENFORCE_EQ(ctx->HasOutput(framework::GradVarName("X")), true,
platform::errors::InvalidArgument(
"Output(X@GRAD) should not be null."));
ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X"));
}
protected: protected:
framework::OpKernelType GetExpectedKernelType( framework::OpKernelType GetExpectedKernelType(
const framework::ExecutionContext &ctx) const override { const framework::ExecutionContext &ctx) const override {
...@@ -335,40 +258,34 @@ DECLARE_NO_NEED_BUFFER_VARS_INFERER( ...@@ -335,40 +258,34 @@ DECLARE_NO_NEED_BUFFER_VARS_INFERER(
namespace ops = paddle::operators; namespace ops = paddle::operators;
DECLARE_INFER_SHAPE_FUNCTOR(max_pool2d_with_index,
MaxPool2dWithIndexInferShapeFunctor,
PD_INFER_META(phi::MaxPoolWithIndexInferMeta));
DECLARE_INFER_SHAPE_FUNCTOR(max_pool2d_with_index_grad,
MaxPool2dWithIndexGradInferShapeFunctor,
PD_INFER_META(phi::MaxPoolWithIndexGradInferMeta));
REGISTER_OPERATOR(max_pool2d_with_index, ops::MaxPoolWithIndexOp, REGISTER_OPERATOR(max_pool2d_with_index, ops::MaxPoolWithIndexOp,
ops::MaxPool2dWithIndexOpMaker, ops::MaxPool2dWithIndexOpMaker,
ops::MaxPoolWithIndexGradOpMaker<paddle::framework::OpDesc>, ops::MaxPoolWithIndexGradOpMaker<paddle::framework::OpDesc>,
ops::MaxPoolWithIndexGradOpMaker<paddle::imperative::OpBase>); ops::MaxPoolWithIndexGradOpMaker<paddle::imperative::OpBase>,
MaxPool2dWithIndexInferShapeFunctor);
REGISTER_OPERATOR(max_pool2d_with_index_grad, ops::MaxPoolWithIndexOpGrad, REGISTER_OPERATOR(max_pool2d_with_index_grad, ops::MaxPoolWithIndexOpGrad,
ops::MaxPoolWithIndexOpGradNoNeedBufferVarsInferer); ops::MaxPoolWithIndexOpGradNoNeedBufferVarsInferer,
MaxPool2dWithIndexGradInferShapeFunctor);
REGISTER_OP_CPU_KERNEL(
max_pool2d_with_index, DECLARE_INFER_SHAPE_FUNCTOR(max_pool3d_with_index,
ops::MaxPoolWithIndexKernel<paddle::platform::CPUDeviceContext, float, int>, MaxPool3dWithIndexInferShapeFunctor,
ops::MaxPoolWithIndexKernel<paddle::platform::CPUDeviceContext, double, PD_INFER_META(phi::MaxPoolWithIndexInferMeta));
int>); DECLARE_INFER_SHAPE_FUNCTOR(max_pool3d_with_index_grad,
REGISTER_OP_CPU_KERNEL( MaxPool3dWithIndexGradInferShapeFunctor,
max_pool2d_with_index_grad, PD_INFER_META(phi::MaxPoolWithIndexGradInferMeta));
ops::MaxPoolWithIndexGradKernel<paddle::platform::CPUDeviceContext, float,
int>,
ops::MaxPoolWithIndexGradKernel<paddle::platform::CPUDeviceContext, double,
int>);
REGISTER_OPERATOR(max_pool3d_with_index, ops::MaxPoolWithIndexOp, REGISTER_OPERATOR(max_pool3d_with_index, ops::MaxPoolWithIndexOp,
ops::MaxPool3dWithIndexOpMaker, ops::MaxPool3dWithIndexOpMaker,
ops::MaxPoolWithIndexGradOpMaker<paddle::framework::OpDesc>, ops::MaxPoolWithIndexGradOpMaker<paddle::framework::OpDesc>,
ops::MaxPoolWithIndexGradOpMaker<paddle::imperative::OpBase>); ops::MaxPoolWithIndexGradOpMaker<paddle::imperative::OpBase>,
MaxPool3dWithIndexInferShapeFunctor);
REGISTER_OPERATOR(max_pool3d_with_index_grad, ops::MaxPoolWithIndexOpGrad, REGISTER_OPERATOR(max_pool3d_with_index_grad, ops::MaxPoolWithIndexOpGrad,
ops::MaxPoolWithIndexOpGradNoNeedBufferVarsInferer); ops::MaxPoolWithIndexOpGradNoNeedBufferVarsInferer,
MaxPool3dWithIndexGradInferShapeFunctor);
REGISTER_OP_CPU_KERNEL(
max_pool3d_with_index,
ops::MaxPoolWithIndexKernel<paddle::platform::CPUDeviceContext, float, int>,
ops::MaxPoolWithIndexKernel<paddle::platform::CPUDeviceContext, double,
int>);
REGISTER_OP_CPU_KERNEL(
max_pool3d_with_index_grad,
ops::MaxPoolWithIndexGradKernel<paddle::platform::CPUDeviceContext, float,
int>,
ops::MaxPoolWithIndexGradKernel<paddle::platform::CPUDeviceContext, double,
int>);
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/pool_with_index_op.h"
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
max_pool2d_with_index,
ops::MaxPoolWithIndexKernel<paddle::platform::CUDADeviceContext, float,
int>,
ops::MaxPoolWithIndexKernel<paddle::platform::CUDADeviceContext, double,
int>);
REGISTER_OP_CUDA_KERNEL(
max_pool2d_with_index_grad,
ops::MaxPoolWithIndexGradKernel<paddle::platform::CUDADeviceContext, float,
int>,
ops::MaxPoolWithIndexGradKernel<paddle::platform::CUDADeviceContext, double,
int>);
REGISTER_OP_CUDA_KERNEL(
max_pool3d_with_index,
ops::MaxPoolWithIndexKernel<paddle::platform::CUDADeviceContext, float,
int>,
ops::MaxPoolWithIndexKernel<paddle::platform::CUDADeviceContext, double,
int>);
REGISTER_OP_CUDA_KERNEL(
max_pool3d_with_index_grad,
ops::MaxPoolWithIndexGradKernel<paddle::platform::CUDADeviceContext, float,
int>,
ops::MaxPoolWithIndexGradKernel<paddle::platform::CUDADeviceContext, double,
int>);
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <vector>
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/math/pooling.h"
#include "paddle/phi/kernels/funcs/math_function.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
template <typename DeviceContext, typename T1, typename T2>
class MaxPoolWithIndexKernel : public framework::OpKernel<T1> {
public:
void Compute(const framework::ExecutionContext& context) const override {
const Tensor* in_x = context.Input<Tensor>("X");
Tensor* out = context.Output<Tensor>("Out");
Tensor* mask = context.Output<Tensor>("Mask");
std::vector<int> ksize = context.Attr<std::vector<int>>("ksize");
std::vector<int> strides = context.Attr<std::vector<int>>("strides");
std::vector<int> paddings = context.Attr<std::vector<int>>("paddings");
bool adaptive = context.Attr<bool>("adaptive");
auto& dev_ctx = context.template device_context<DeviceContext>();
if (context.Attr<bool>("global_pooling")) {
for (size_t i = 0; i < ksize.size(); ++i) {
paddings[i] = 0;
ksize[i] = static_cast<int>(in_x->dims()[i + 2]);
}
}
switch (ksize.size()) {
case 2: {
paddle::operators::math::MaxPool2dWithIndexFunctor<DeviceContext, T1,
T2>
pool2d_forward;
pool2d_forward(dev_ctx, *in_x, ksize, strides, paddings, adaptive, out,
mask);
} break;
case 3: {
paddle::operators::math::MaxPool3dWithIndexFunctor<DeviceContext, T1,
T2>
pool3d_forward;
pool3d_forward(dev_ctx, *in_x, ksize, strides, paddings, adaptive, out,
mask);
} break;
default: {
PADDLE_THROW(platform::errors::InvalidArgument(
"Pool op only supports 2D and 3D input."));
}
}
}
};
template <typename DeviceContext, typename T1, typename T2>
class MaxPoolWithIndexGradKernel : public framework::OpKernel<T1> {
public:
void Compute(const framework::ExecutionContext& context) const override {
const Tensor* mask = context.Input<Tensor>("Mask");
const Tensor* out_grad =
context.Input<Tensor>(framework::GradVarName("Out"));
Tensor* in_x_grad = context.Output<Tensor>(framework::GradVarName("X"));
std::vector<int> ksize = context.Attr<std::vector<int>>("ksize");
std::vector<int> strides = context.Attr<std::vector<int>>("strides");
std::vector<int> paddings = context.Attr<std::vector<int>>("paddings");
bool adaptive = context.Attr<bool>("adaptive");
if (context.Attr<bool>("global_pooling")) {
for (size_t i = 0; i < ksize.size(); ++i) {
paddings[i] = 0;
ksize[i] = static_cast<int>(in_x_grad->dims()[i + 2]);
}
}
if (in_x_grad) {
in_x_grad->mutable_data<T1>(context.GetPlace());
auto& device_ctx = context.template device_context<DeviceContext>();
phi::funcs::set_constant(device_ctx, in_x_grad, 0);
switch (ksize.size()) {
case 2: {
paddle::operators::math::MaxPool2dWithIndexGradFunctor<DeviceContext,
T1, T2>
pool2d_backward;
pool2d_backward(device_ctx, *out_grad, *mask, ksize, strides,
paddings, adaptive, in_x_grad);
} break;
case 3: {
paddle::operators::math::MaxPool3dWithIndexGradFunctor<DeviceContext,
T1, T2>
pool3d_backward;
pool3d_backward(device_ctx, *out_grad, *mask, ksize, strides,
paddings, adaptive, in_x_grad);
} break;
default: {
PADDLE_THROW(platform::errors::InvalidArgument(
"Pool op only supports 2D and 3D input."));
}
}
}
}
};
} // namespace operators
} // namespace paddle
...@@ -16,9 +16,10 @@ limitations under the License. */ ...@@ -16,9 +16,10 @@ limitations under the License. */
#include <string> #include <string>
#include <vector> #include <vector>
#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/math/pooling.h" #include "paddle/fluid/framework/phi_utils.h"
#include "paddle/fluid/operators/strided_memcpy.h" #include "paddle/fluid/operators/strided_memcpy.h"
#include "paddle/phi/kernels/funcs/math_function.h" #include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/funcs/pooling.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
...@@ -53,14 +54,20 @@ class SppKernel : public framework::OpKernel<T> { ...@@ -53,14 +54,20 @@ class SppKernel : public framework::OpKernel<T> {
out_level.mutable_data<T>(output_shape, context.GetPlace()); out_level.mutable_data<T>(output_shape, context.GetPlace());
// pooling // pooling
if (pooling_type == "max") { if (pooling_type == "max") {
math::Pool2dFunctor<DeviceContext, math::MaxPool<T>, T> pool_forward; phi::funcs::Pool2dFunctor<
math::MaxPool<T> max_process; typename framework::ConvertToPhiContext<DeviceContext>::TYPE,
phi::funcs::MaxPool<T>, T>
pool_forward;
phi::funcs::MaxPool<T> max_process;
pool_forward(context.template device_context<DeviceContext>(), *in_x, pool_forward(context.template device_context<DeviceContext>(), *in_x,
kernel_size, strides, paddings, true, false, &out_level, kernel_size, strides, paddings, true, false, &out_level,
max_process); max_process);
} else if (pooling_type == "avg") { } else if (pooling_type == "avg") {
math::Pool2dFunctor<DeviceContext, math::AvgPool<T>, T> pool_forward; phi::funcs::Pool2dFunctor<
math::AvgPool<T> avg_process; typename framework::ConvertToPhiContext<DeviceContext>::TYPE,
phi::funcs::AvgPool<T>, T>
pool_forward;
phi::funcs::AvgPool<T> avg_process;
pool_forward(context.template device_context<DeviceContext>(), *in_x, pool_forward(context.template device_context<DeviceContext>(), *in_x,
kernel_size, strides, paddings, true, false, &out_level, kernel_size, strides, paddings, true, false, &out_level,
avg_process); avg_process);
...@@ -95,7 +102,9 @@ class SppGradKernel : public framework::OpKernel<T> { ...@@ -95,7 +102,9 @@ class SppGradKernel : public framework::OpKernel<T> {
std::string pooling_type = std::string pooling_type =
context.template Attr<std::string>("pooling_type"); context.template Attr<std::string>("pooling_type");
auto& device_ctx = context.template device_context<DeviceContext>(); auto& device_ctx = context.template device_context<DeviceContext>();
phi::funcs::SetConstant<DeviceContext, T> zero; phi::funcs::SetConstant<
typename framework::ConvertToPhiContext<DeviceContext>::TYPE, T>
zero;
in_x_grad->mutable_data<T>(context.GetPlace()); in_x_grad->mutable_data<T>(context.GetPlace());
zero(device_ctx, in_x_grad, static_cast<T>(0)); zero(device_ctx, in_x_grad, static_cast<T>(0));
auto out_stride = phi::stride(out->dims()); auto out_stride = phi::stride(out->dims());
...@@ -145,14 +154,18 @@ class SppGradKernel : public framework::OpKernel<T> { ...@@ -145,14 +154,18 @@ class SppGradKernel : public framework::OpKernel<T> {
outgrad_level.Resize(out_shape); outgrad_level.Resize(out_shape);
// pooling backward // pooling backward
if (pooling_type == "max") { if (pooling_type == "max") {
math::MaxPool2dGradFunctor<DeviceContext, T> pool2d_backward; phi::funcs::MaxPool2dGradFunctor<
typename framework::ConvertToPhiContext<DeviceContext>::TYPE, T>
pool2d_backward;
pool2d_backward(context.template device_context<DeviceContext>(), *in_x, pool2d_backward(context.template device_context<DeviceContext>(), *in_x,
*&out_level, *&outgrad_level, kernel_size, strides, *&out_level, *&outgrad_level, kernel_size, strides,
paddings, in_x_grad); paddings, in_x_grad);
} else if (pooling_type == "avg") { } else if (pooling_type == "avg") {
math::Pool2dGradFunctor<DeviceContext, math::AvgPoolGrad<T>, T> phi::funcs::Pool2dGradFunctor<
typename framework::ConvertToPhiContext<DeviceContext>::TYPE,
phi::funcs::AvgPoolGrad<T>, T>
pool_backward; pool_backward;
math::AvgPoolGrad<T> avg_process; phi::funcs::AvgPoolGrad<T> avg_process;
pool_backward(context.template device_context<DeviceContext>(), *in_x, pool_backward(context.template device_context<DeviceContext>(), *in_x,
*&out_level, *&outgrad_level, kernel_size, strides, *&out_level, *&outgrad_level, kernel_size, strides,
paddings, true, false, in_x_grad, avg_process); paddings, true, false, in_x_grad, avg_process);
......
...@@ -17,7 +17,6 @@ limitations under the License. */ ...@@ -17,7 +17,6 @@ limitations under the License. */
#include <vector> #include <vector>
#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/math/pooling.h"
#include "paddle/fluid/platform/device_context.h" #include "paddle/fluid/platform/device_context.h"
#include "paddle/phi/kernels/funcs/blas/blas.h" #include "paddle/phi/kernels/funcs/blas/blas.h"
#include "paddle/phi/kernels/funcs/math_function.h" #include "paddle/phi/kernels/funcs/math_function.h"
......
...@@ -16,7 +16,6 @@ limitations under the License. */ ...@@ -16,7 +16,6 @@ limitations under the License. */
#include <vector> #include <vector>
#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/math/pooling.h"
#include "paddle/fluid/operators/utils.h" #include "paddle/fluid/operators/utils.h"
#include "paddle/fluid/platform/device_context.h" #include "paddle/fluid/platform/device_context.h"
#include "paddle/phi/kernels/funcs/blas/blas.h" #include "paddle/phi/kernels/funcs/blas/blas.h"
......
...@@ -26,11 +26,13 @@ namespace phi { ...@@ -26,11 +26,13 @@ namespace phi {
// TODO(chenweihang): add other flags if needed // TODO(chenweihang): add other flags if needed
struct MetaConfig { struct MetaConfig {
bool is_runtime{true}; bool is_runtime{true};
bool is_run_mkldnn_kernel{false};
MetaConfig() = default; MetaConfig() = default;
// supporting implicit construction is easier to use // supporting implicit construction is easier to use
MetaConfig(bool is_runtime) : is_runtime(is_runtime) {} // NOLINT MetaConfig(bool is_runtime, bool is_run_mkldnn_kernel)
: is_runtime(is_runtime),
is_run_mkldnn_kernel(is_run_mkldnn_kernel) {} // NOLINT
}; };
class MetaTensor { class MetaTensor {
......
...@@ -122,6 +122,35 @@ void GumbelSoftmaxGradInferMeta(const MetaTensor& out, ...@@ -122,6 +122,35 @@ void GumbelSoftmaxGradInferMeta(const MetaTensor& out,
dx->share_meta(dout); dx->share_meta(dout);
} }
void MaxPoolWithIndexGradInferMeta(const MetaTensor& x,
const MetaTensor& mask,
const MetaTensor& dout,
const std::vector<int>& kernel_size,
const std::vector<int>& strides,
const std::vector<int>& paddings,
bool global_pooling,
bool adaptive,
MetaTensor* dx) {
dx->share_meta(x);
}
void PoolGradInferMeta(const MetaTensor& x,
const MetaTensor& out,
const MetaTensor& dout,
const std::vector<int>& kernel_size,
const std::vector<int>& strides,
const std::vector<int>& paddings,
bool ceil_mode,
bool exclusive,
const std::string& data_format,
const std::string& pooling_type,
bool global_pooling,
bool adaptive,
const std::string& padding_algorithm,
MetaTensor* dx) {
dx->share_meta(x);
}
void PsroiPoolGradInferMeta(const MetaTensor& x, void PsroiPoolGradInferMeta(const MetaTensor& x,
const MetaTensor& rois, const MetaTensor& rois,
paddle::optional<const MetaTensor&> rois_num, paddle::optional<const MetaTensor&> rois_num,
......
...@@ -54,6 +54,16 @@ void GumbelSoftmaxGradInferMeta(const MetaTensor& out, ...@@ -54,6 +54,16 @@ void GumbelSoftmaxGradInferMeta(const MetaTensor& out,
int axis, int axis,
MetaTensor* dx); MetaTensor* dx);
void MaxPoolWithIndexGradInferMeta(const MetaTensor& x,
const MetaTensor& mask,
const MetaTensor& dout,
const std::vector<int>& kernel_size,
const std::vector<int>& strides,
const std::vector<int>& paddings,
bool global_pooling,
bool adaptive,
MetaTensor* dx);
void PsroiPoolGradInferMeta(const MetaTensor& x, void PsroiPoolGradInferMeta(const MetaTensor& x,
const MetaTensor& rois, const MetaTensor& rois,
paddle::optional<const MetaTensor&> rois_num, paddle::optional<const MetaTensor&> rois_num,
...@@ -64,6 +74,21 @@ void PsroiPoolGradInferMeta(const MetaTensor& x, ...@@ -64,6 +74,21 @@ void PsroiPoolGradInferMeta(const MetaTensor& x,
float spatial_scale, float spatial_scale,
MetaTensor* dx); MetaTensor* dx);
void PoolGradInferMeta(const MetaTensor& x,
const MetaTensor& out,
const MetaTensor& dout,
const std::vector<int>& kernel_size,
const std::vector<int>& strides,
const std::vector<int>& paddings,
bool ceil_mode,
bool exclusive,
const std::string& data_format,
const std::string& pooling_type,
bool global_pooling,
bool adaptive,
const std::string& padding_algorithm,
MetaTensor* dx);
void ScatterGradInferMeta(const MetaTensor& index, void ScatterGradInferMeta(const MetaTensor& index,
const MetaTensor& updates, const MetaTensor& updates,
const MetaTensor& out_grad, const MetaTensor& out_grad,
......
...@@ -22,6 +22,7 @@ limitations under the License. */ ...@@ -22,6 +22,7 @@ limitations under the License. */
#include "paddle/phi/common/type_traits.h" #include "paddle/phi/common/type_traits.h"
#include "paddle/phi/core/enforce.h" #include "paddle/phi/core/enforce.h"
#include "paddle/phi/core/infermeta_utils.h" #include "paddle/phi/core/infermeta_utils.h"
#include "paddle/phi/kernels/funcs/pooling.h"
#include "paddle/phi/kernels/funcs/unfold_functor.h" #include "paddle/phi/kernels/funcs/unfold_functor.h"
namespace phi { namespace phi {
...@@ -553,6 +554,78 @@ void IsfiniteInferMeta(const MetaTensor& x, MetaTensor* out) { ...@@ -553,6 +554,78 @@ void IsfiniteInferMeta(const MetaTensor& x, MetaTensor* out) {
out->set_dtype(DataType::BOOL); out->set_dtype(DataType::BOOL);
} }
void MaxPoolWithIndexInferMeta(const MetaTensor& x,
const std::vector<int>& kernel_size,
const std::vector<int>& strides,
const std::vector<int>& paddings,
bool global_pooling,
bool adaptive,
MetaTensor* out,
MetaTensor* mask,
MetaConfig config) {
std::vector<int> paddings_ = paddings;
std::vector<int> kernel_size_ = kernel_size;
auto x_dims = x.dims();
PADDLE_ENFORCE(
x_dims.size() == 4 || x_dims.size() == 5,
errors::InvalidArgument(
"Pooling intput should be 4-D or 5-D tensor but received %dD-Tensor",
x_dims.size()));
if (global_pooling) {
kernel_size_.resize(static_cast<size_t>(x_dims.size()) - 2);
for (size_t i = 0; i < kernel_size_.size(); ++i) {
paddings_[i] = 0;
kernel_size_[i] = static_cast<int>(x_dims[i + 2]);
}
}
PADDLE_ENFORCE_EQ(
x_dims.size() - kernel_size_.size(),
2U,
errors::InvalidArgument(
"The input size %d minus the kernel size %d should equal to 2.",
x_dims.size(),
kernel_size_.size()));
PADDLE_ENFORCE_EQ(
kernel_size_.size(),
strides.size(),
errors::InvalidArgument(
"Strides size %d and pooling size %d should be the same.",
strides.size(),
kernel_size_.size()));
PADDLE_ENFORCE_EQ(
kernel_size_.size(),
paddings_.size(),
errors::InvalidArgument(
"Paddings size %d and pooling size %d should be the same.",
paddings_.size(),
kernel_size_.size()));
std::vector<int64_t> output_shape({x_dims[0], x_dims[1]});
if (adaptive) {
output_shape.insert(
output_shape.end(), kernel_size_.begin(), kernel_size_.end());
} else {
for (size_t i = 0; i < kernel_size_.size(); ++i) {
if ((!config.is_runtime) && (x_dims[i + 2] < 0)) {
output_shape.push_back(x_dims[i + 2]);
} else {
output_shape.push_back(funcs::MaxPoolOutputSize(
x_dims[i + 2], kernel_size_[i], paddings_[i], strides[i]));
}
}
}
out->set_dims(make_ddim(output_shape));
out->set_dtype(x.dtype());
mask->set_dims(make_ddim(output_shape));
mask->set_dtype(paddle::experimental::CppTypeToDataType<int>::Type());
}
void MultinomialInferMeta(const MetaTensor& x, void MultinomialInferMeta(const MetaTensor& x,
int num_samples, int num_samples,
bool replacement, bool replacement,
...@@ -675,6 +748,118 @@ void PixelShuffleInferMeta(const MetaTensor& x, ...@@ -675,6 +748,118 @@ void PixelShuffleInferMeta(const MetaTensor& x,
out->set_dims(output_dims); out->set_dims(output_dims);
} }
void PoolInferMeta(const MetaTensor& x,
const std::vector<int>& kernel_size,
const std::vector<int>& strides,
const std::vector<int>& paddings,
bool ceil_mode,
bool exclusive,
const std::string& data_format,
const std::string& pooling_type,
bool global_pooling,
bool adaptive,
const std::string& padding_algorithm,
MetaTensor* out,
MetaConfig config) {
std::vector<int> paddings_ = paddings;
std::vector<int> kernel_size_ = kernel_size;
auto x_dims = x.dims();
PADDLE_ENFORCE_EQ(
x_dims.size() == 4 || x_dims.size() == 5,
true,
errors::InvalidArgument(
"the input of Op(pool) should be 4-D or 5-D Tensor. But "
"received: %u-D Tensor and it's shape is [%s].",
x_dims.size(),
x_dims));
PADDLE_ENFORCE_EQ(x_dims.size() - kernel_size_.size(),
2U,
errors::InvalidArgument(
"the dimension of input minus the size of "
"Attr(kernel_size_) must be euqal to 2 in Op(pool). "
"But received: the dimension of input minus the size "
"of Attr(kernel_size_) is %d, the "
"input's dimension is %d, the shape of input "
"is [%s], the Attr(kernel_size_)'s size is %d, the "
"Attr(kernel_size_) is [%s].",
x_dims.size() - kernel_size_.size(),
x_dims.size(),
x_dims,
kernel_size_.size(),
make_ddim(kernel_size_)));
PADDLE_ENFORCE_EQ(
kernel_size_.size(),
strides.size(),
errors::InvalidArgument(
"the size of Attr(kernel_size_) and Attr(strides) in "
"Op(pool) must be equal. "
"But received: Attr(kernel_size_)'s size is %d, Attr(strides)'s "
"size is %d, Attr(kernel_size_) is [%s], Attr(strides)is [%s].",
kernel_size_.size(),
strides.size(),
make_ddim(kernel_size_),
make_ddim(strides)));
// MKL-DNN Kernels are using NCHW order of dims description
// so we ignore data_format consideration for MKL-DNN kernel
const bool channel_last = (config.is_run_mkldnn_kernel == false) &&
(data_format == "NHWC" || data_format == "NDHWC");
// update paddings if "SAME" or global_pooling
DDim data_dims;
if (channel_last) {
data_dims = slice_ddim(x_dims, 1, x_dims.size() - 1);
} else {
data_dims = slice_ddim(x_dims, 2, x_dims.size());
}
funcs::UpdatePadding(&paddings_,
global_pooling,
adaptive,
padding_algorithm,
data_dims,
strides,
kernel_size_);
if (global_pooling) {
funcs::UpdateKernelSize(&kernel_size_, data_dims);
}
std::vector<int64_t> output_shape;
if (adaptive) {
output_shape.insert(
output_shape.end(), kernel_size_.begin(), kernel_size_.end());
} else {
for (int i = 0; i < data_dims.size(); ++i) {
if ((!config.is_runtime) && (data_dims[i] < 0)) {
output_shape.push_back(data_dims[i]);
} else {
output_shape.push_back(funcs::PoolOutputSize(data_dims[i],
kernel_size_[i],
paddings_[2 * i],
paddings_[2 * i + 1],
strides[i],
ceil_mode));
}
}
}
// output_N = input_N
output_shape.insert(output_shape.begin(), x_dims[0]);
// output_C = input_C
if (channel_last) {
output_shape.push_back(x_dims[x_dims.size() - 1]);
} else {
output_shape.insert(output_shape.begin() + 1, x_dims[1]);
}
out->set_dims(make_ddim(output_shape));
out->share_lod(x);
out->set_dtype(x.dtype());
}
void RealAndImagInferMeta(const MetaTensor& x, MetaTensor* out) { void RealAndImagInferMeta(const MetaTensor& x, MetaTensor* out) {
out->set_dims(x.dims()); out->set_dims(x.dims());
out->set_dtype(dtype::ToReal(x.dtype())); out->set_dtype(dtype::ToReal(x.dtype()));
......
...@@ -98,6 +98,16 @@ void IsEmptyInferMeta(const MetaTensor& x, MetaTensor* out); ...@@ -98,6 +98,16 @@ void IsEmptyInferMeta(const MetaTensor& x, MetaTensor* out);
void IsfiniteInferMeta(const MetaTensor& input, MetaTensor* out); void IsfiniteInferMeta(const MetaTensor& input, MetaTensor* out);
void MaxPoolWithIndexInferMeta(const MetaTensor& x,
const std::vector<int>& kernel_size,
const std::vector<int>& strides,
const std::vector<int>& paddings,
bool global_pooling,
bool adaptive,
MetaTensor* out,
MetaTensor* mask,
MetaConfig config = MetaConfig());
void MultinomialInferMeta(const MetaTensor& x, void MultinomialInferMeta(const MetaTensor& x,
int num_samples, int num_samples,
bool replacement, bool replacement,
...@@ -114,6 +124,20 @@ void PixelShuffleInferMeta(const MetaTensor& x, ...@@ -114,6 +124,20 @@ void PixelShuffleInferMeta(const MetaTensor& x,
const std::string& data_format, const std::string& data_format,
MetaTensor* out); MetaTensor* out);
void PoolInferMeta(const MetaTensor& x,
const std::vector<int>& kernel_size,
const std::vector<int>& strides,
const std::vector<int>& paddings,
bool ceil_mode,
bool exclusive,
const std::string& data_format,
const std::string& pooling_type,
bool global_pooling,
bool adaptive,
const std::string& padding_algorithm,
MetaTensor* out,
MetaConfig config = MetaConfig());
void RealAndImagInferMeta(const MetaTensor& x, MetaTensor* out); void RealAndImagInferMeta(const MetaTensor& x, MetaTensor* out);
void ReduceInferMeta(const MetaTensor& x, void ReduceInferMeta(const MetaTensor& x,
......
...@@ -11,7 +11,7 @@ set_property(GLOBAL PROPERTY PHI_KERNELS "") ...@@ -11,7 +11,7 @@ set_property(GLOBAL PROPERTY PHI_KERNELS "")
# [ 1. Common kernel compilation dependencies ] # [ 1. Common kernel compilation dependencies ]
set(COMMON_KERNEL_DEPS dense_tensor sparse_coo_tensor sparse_csr_tensor kernel_context kernel_factory arg_map_context convert_utils lod_utils custom_kernel) set(COMMON_KERNEL_DEPS dense_tensor sparse_coo_tensor sparse_csr_tensor kernel_context kernel_factory arg_map_context convert_utils lod_utils custom_kernel)
set(COMMON_KERNEL_DEPS ${COMMON_KERNEL_DEPS} eigen_function blas math_function im2col vol2col concat_and_split_functor softmax) set(COMMON_KERNEL_DEPS ${COMMON_KERNEL_DEPS} eigen_function blas math_function im2col vol2col concat_and_split_functor)
# remove this dep after removing fluid deps on tensor creation # remove this dep after removing fluid deps on tensor creation
set(COMMON_KERNEL_DEPS ${COMMON_KERNEL_DEPS} phi_api_utils) set(COMMON_KERNEL_DEPS ${COMMON_KERNEL_DEPS} phi_api_utils)
set(COMMON_KERNEL_DEPS ${COMMON_KERNEL_DEPS} infermeta) set(COMMON_KERNEL_DEPS ${COMMON_KERNEL_DEPS} infermeta)
...@@ -27,22 +27,25 @@ kernel_library(full_kernel DEPS ${COMMON_KERNEL_DEPS} empty_kernel) ...@@ -27,22 +27,25 @@ kernel_library(full_kernel DEPS ${COMMON_KERNEL_DEPS} empty_kernel)
# Some kernels depend on some targets that are not commonly used. # Some kernels depend on some targets that are not commonly used.
# These targets are not suitable for common dependencies. # These targets are not suitable for common dependencies.
# In this case, you need to manually generate them here. # In this case, you need to manually generate them here.
set(MANUAL_BUILD_KERNELS math_kernel softmax_kernel softmax_grad_kernel triangular_solve_grad_kernel maxout_kernel maxout_grad_kernel put_along_axis_kernel put_along_axis_grad_kernel take_along_axis_kernel take_along_axis_grad_kernel eigh_kernel segment_pool_kernel segment_pool_grad_kernel matrix_power_kernel matrix_power_grad_kernel) set(MANUAL_BUILD_KERNELS eigh_kernel gumbel_softmax_kernel gumbel_softmax_grad_kernel math_kernel matrix_power_kernel matrix_power_grad_kernel maxout_kernel maxout_grad_kernel pool_kernel put_along_axis_kernel put_along_axis_grad_kernel segment_pool_kernel segment_pool_grad_kernel softmax_kernel softmax_grad_kernel take_along_axis_kernel take_along_axis_grad_kernel triangular_solve_grad_kernel)
kernel_library(eigh_kernel DEPS ${COMMON_KERNEL_DEPS} lapack_function)
kernel_library(gumbel_softmax_kernel DEPS ${COMMON_KERNEL_DEPS} softmax)
kernel_library(gumbel_softmax_grad_kernel DEPS ${COMMON_KERNEL_DEPS} softmax)
kernel_library(math_kernel DEPS ${COMMON_KERNEL_DEPS} cast_kernel copy_kernel) kernel_library(math_kernel DEPS ${COMMON_KERNEL_DEPS} cast_kernel copy_kernel)
kernel_library(softmax_kernel DEPS ${COMMON_KERNEL_DEPS} softmax) kernel_library(matrix_power_kernel DEPS ${COMMON_KERNEL_DEPS} matrix_inverse)
kernel_library(softmax_grad_kernel DEPS ${COMMON_KERNEL_DEPS} softmax) kernel_library(matrix_power_grad_kernel DEPS ${COMMON_KERNEL_DEPS} matrix_inverse)
kernel_library(triangular_solve_grad_kernel DEPS ${COMMON_KERNEL_DEPS} matrix_reduce)
kernel_library(maxout_kernel DEPS ${COMMON_KERNEL_DEPS} maxouting) kernel_library(maxout_kernel DEPS ${COMMON_KERNEL_DEPS} maxouting)
kernel_library(maxout_grad_kernel DEPS ${COMMON_KERNEL_DEPS} maxouting) kernel_library(maxout_grad_kernel DEPS ${COMMON_KERNEL_DEPS} maxouting)
kernel_library(pool_kernel DEPS ${COMMON_KERNEL_DEPS} pooling)
kernel_library(put_along_axis_kernel DEPS ${COMMON_KERNEL_DEPS} gather_scatter_kernel) kernel_library(put_along_axis_kernel DEPS ${COMMON_KERNEL_DEPS} gather_scatter_kernel)
kernel_library(put_along_axis_grad_kernel DEPS ${COMMON_KERNEL_DEPS} gather_scatter_kernel) kernel_library(put_along_axis_grad_kernel DEPS ${COMMON_KERNEL_DEPS} gather_scatter_kernel)
kernel_library(take_along_axis_kernel DEPS ${COMMON_KERNEL_DEPS} gather_scatter_kernel)
kernel_library(take_along_axis_grad_kernel DEPS ${COMMON_KERNEL_DEPS} gather_scatter_kernel)
kernel_library(matrix_power_kernel DEPS ${COMMON_KERNEL_DEPS} matrix_inverse)
kernel_library(matrix_power_grad_kernel DEPS ${COMMON_KERNEL_DEPS} matrix_inverse)
kernel_library(eigh_kernel DEPS ${COMMON_KERNEL_DEPS} lapack_function)
kernel_library(segment_pool_kernel DEPS ${COMMON_KERNEL_DEPS} segment_pooling) kernel_library(segment_pool_kernel DEPS ${COMMON_KERNEL_DEPS} segment_pooling)
kernel_library(segment_pool_grad_kernel DEPS ${COMMON_KERNEL_DEPS} segment_pooling) kernel_library(segment_pool_grad_kernel DEPS ${COMMON_KERNEL_DEPS} segment_pooling)
kernel_library(softmax_kernel DEPS ${COMMON_KERNEL_DEPS} softmax)
kernel_library(softmax_grad_kernel DEPS ${COMMON_KERNEL_DEPS} softmax)
kernel_library(take_along_axis_kernel DEPS ${COMMON_KERNEL_DEPS} gather_scatter_kernel)
kernel_library(take_along_axis_grad_kernel DEPS ${COMMON_KERNEL_DEPS} gather_scatter_kernel)
kernel_library(triangular_solve_grad_kernel DEPS ${COMMON_KERNEL_DEPS} matrix_reduce)
# 4. auto parse and build kernel targets by cmake # 4. auto parse and build kernel targets by cmake
register_kernels(EXCLUDES ${COMMON_BAISC_KERNELS} ${MANUAL_BUILD_KERNELS} DEPS ${COMMON_KERNEL_DEPS} ${COMMON_BAISC_KERNELS} ) register_kernels(EXCLUDES ${COMMON_BAISC_KERNELS} ${MANUAL_BUILD_KERNELS} DEPS ${COMMON_KERNEL_DEPS} ${COMMON_BAISC_KERNELS} )
......
...@@ -40,7 +40,7 @@ DenseTensor Concat(const Context& dev_ctx, ...@@ -40,7 +40,7 @@ DenseTensor Concat(const Context& dev_ctx,
DenseTensor dense_out; DenseTensor dense_out;
MetaTensor meta_out(&dense_out); MetaTensor meta_out(&dense_out);
ConcatInferMeta(meta_x_ptr, axis.to<int>(), &meta_out, /*is_runtime=*/true); ConcatInferMeta(meta_x_ptr, axis.to<int>(), &meta_out);
ConcatKernel<T, Context>(dev_ctx, x, axis, &dense_out); ConcatKernel<T, Context>(dev_ctx, x, axis, &dense_out);
return dense_out; return dense_out;
} }
......
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/pool_grad_kernel.h"
#include "paddle/phi/kernels/impl/pool_grad_kernel_impl.h"
#include "paddle/phi/core/kernel_registry.h"
PD_REGISTER_KERNEL(
pool2d_grad, CPU, ALL_LAYOUT, phi::Pool2dGradKernel, float, double) {}
PD_REGISTER_KERNEL(pool2d_double_grad,
CPU,
ALL_LAYOUT,
phi::Pool2dDoubleGradKernel,
float,
double) {}
PD_REGISTER_KERNEL(max_pool2d_with_index_grad,
CPU,
ALL_LAYOUT,
phi::MaxPool2dWithIndexGradKernel,
float,
double) {
kernel->InputAt(1).SetDataType(
paddle::experimental::CppTypeToDataType<int>::Type());
}
PD_REGISTER_KERNEL(
pool3d_grad, CPU, ALL_LAYOUT, phi::Pool3dGradKernel, float, double) {}
PD_REGISTER_KERNEL(max_pool3d_with_index_grad,
CPU,
ALL_LAYOUT,
phi::MaxPool3dWithIndexGradKernel,
float,
double) {
kernel->InputAt(1).SetDataType(
paddle::experimental::CppTypeToDataType<int>::Type());
}
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/pool_kernel.h"
#include "paddle/phi/kernels/impl/pool_kernel_impl.h"
#include "paddle/phi/core/kernel_registry.h"
PD_REGISTER_KERNEL(pool2d, CPU, ALL_LAYOUT, phi::Pool2dKernel, float, double) {}
PD_REGISTER_KERNEL(max_pool2d_with_index,
CPU,
ALL_LAYOUT,
phi::MaxPool2dWithIndexKernel,
float,
double) {
kernel->OutputAt(1).SetDataType(
paddle::experimental::CppTypeToDataType<int>::Type());
}
PD_REGISTER_KERNEL(pool3d, CPU, ALL_LAYOUT, phi::Pool3dKernel, float, double) {}
PD_REGISTER_KERNEL(max_pool3d_with_index,
CPU,
ALL_LAYOUT,
phi::MaxPool3dWithIndexKernel,
float,
double) {
kernel->OutputAt(1).SetDataType(
paddle::experimental::CppTypeToDataType<int>::Type());
}
...@@ -38,7 +38,7 @@ void SplitKernel(const Context& dev_ctx, ...@@ -38,7 +38,7 @@ void SplitKernel(const Context& dev_ctx,
out_metas_ptr.push_back(&out_metas.back()); out_metas_ptr.push_back(&out_metas.back());
} }
phi::SplitInferMeta(x, num_or_sections, axis_scalar, out_metas_ptr, true); phi::SplitInferMeta(x, num_or_sections, axis_scalar, out_metas_ptr);
for (size_t i = 0; i < out_metas.size(); ++i) { for (size_t i = 0; i < out_metas.size(); ++i) {
outs[i]->Resize(out_metas[i].dims()); outs[i]->Resize(out_metas[i].dims());
......
...@@ -3,11 +3,12 @@ add_subdirectory(blas) ...@@ -3,11 +3,12 @@ add_subdirectory(blas)
add_subdirectory(lapack) add_subdirectory(lapack)
add_subdirectory(detail) add_subdirectory(detail)
math_library(math_function DEPS blas dense_tensor tensor) math_library(concat_and_split_functor DEPS dense_tensor)
math_library(segment_pooling)
math_library(sequence2batch)
math_library(gru_compute DEPS activation_functions math_function) math_library(gru_compute DEPS activation_functions math_function)
math_library(lstm_compute DEPS activation_functions) math_library(lstm_compute DEPS activation_functions)
math_library(concat_and_split_functor DEPS dense_tensor) math_library(math_function DEPS blas dense_tensor tensor)
math_library(matrix_reduce DEPS dense_tensor) math_library(matrix_reduce DEPS dense_tensor)
math_library(matrix_inverse DEPS dense_tensor eigen3 blas) math_library(matrix_inverse DEPS dense_tensor eigen3 blas)
math_library(pooling DEPS dense_tensor)
math_library(segment_pooling)
math_library(sequence2batch)
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. /* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
...@@ -11,11 +11,15 @@ distributed under the License is distributed on an "AS IS" BASIS, ...@@ -11,11 +11,15 @@ distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "paddle/fluid/operators/math/pooling.h"
namespace paddle { #include "paddle/phi/kernels/funcs/pooling.h"
namespace operators {
namespace math { #include <algorithm>
#include <vector>
#include "paddle/phi/backends/cpu/cpu_context.h"
namespace phi {
namespace funcs {
/* /*
* Tensors are in NCHW or NHWC format. * Tensors are in NCHW or NHWC format.
...@@ -25,13 +29,16 @@ namespace math { ...@@ -25,13 +29,16 @@ namespace math {
* height_down, width_left and width_right, respectively. * height_down, width_left and width_right, respectively.
*/ */
template <typename PoolProcess, typename T> template <typename PoolProcess, typename T>
class Pool2dFunctor<platform::CPUDeviceContext, PoolProcess, T> { class Pool2dFunctor<CPUContext, PoolProcess, T> {
public: public:
void operator()(const platform::CPUDeviceContext& context, void operator()(const CPUContext& context,
const framework::Tensor& input, const std::vector<int>& ksize, const DenseTensor& input,
const std::vector<int>& ksize,
const std::vector<int>& strides, const std::vector<int>& strides,
const std::vector<int>& paddings, bool exclusive, const std::vector<int>& paddings,
bool adaptive, framework::Tensor* output, bool exclusive,
bool adaptive,
DenseTensor* output,
PoolProcess pool_process) { PoolProcess pool_process) {
const int batch_size = input.dims()[0]; const int batch_size = input.dims()[0];
const int input_height = input.dims()[2]; const int input_height = input.dims()[2];
...@@ -50,7 +57,7 @@ class Pool2dFunctor<platform::CPUDeviceContext, PoolProcess, T> { ...@@ -50,7 +57,7 @@ class Pool2dFunctor<platform::CPUDeviceContext, PoolProcess, T> {
const int output_stride = output_height * output_width; const int output_stride = output_height * output_width;
const T* input_data = input.data<T>(); const T* input_data = input.data<T>();
T* output_data = output->mutable_data<T>(context.GetPlace()); T* output_data = context.template Alloc<T>(output);
int hstart = 0, hend = 1; int hstart = 0, hend = 1;
int wstart = 0, wend = 1; int wstart = 0, wend = 1;
...@@ -101,12 +108,16 @@ class Pool2dFunctor<platform::CPUDeviceContext, PoolProcess, T> { ...@@ -101,12 +108,16 @@ class Pool2dFunctor<platform::CPUDeviceContext, PoolProcess, T> {
} }
} }
void operator()(const platform::CPUDeviceContext& context, void operator()(const CPUContext& context,
const framework::Tensor& input, const std::vector<int>& ksize, const DenseTensor& input,
const std::vector<int>& ksize,
const std::vector<int>& strides, const std::vector<int>& strides,
const std::vector<int>& paddings, const std::vector<int>& paddings,
const std::string data_format, bool exclusive, bool adaptive, const std::string data_format,
framework::Tensor* output, PoolProcess pool_process) { bool exclusive,
bool adaptive,
DenseTensor* output,
PoolProcess pool_process) {
bool channel_last = (data_format == "NHWC"); bool channel_last = (data_format == "NHWC");
const int batch_size = input.dims()[0]; const int batch_size = input.dims()[0];
...@@ -131,7 +142,7 @@ class Pool2dFunctor<platform::CPUDeviceContext, PoolProcess, T> { ...@@ -131,7 +142,7 @@ class Pool2dFunctor<platform::CPUDeviceContext, PoolProcess, T> {
const int padding_width = paddings[1]; const int padding_width = paddings[1];
const T* input_data = input.data<T>(); const T* input_data = input.data<T>();
T* output_data = output->mutable_data<T>(context.GetPlace()); T* output_data = context.template Alloc<T>(output);
int hstart = 0, hend = 1; int hstart = 0, hend = 1;
int wstart = 0, wend = 1; int wstart = 0, wend = 1;
...@@ -244,14 +255,19 @@ class Pool2dFunctor<platform::CPUDeviceContext, PoolProcess, T> { ...@@ -244,14 +255,19 @@ class Pool2dFunctor<platform::CPUDeviceContext, PoolProcess, T> {
* height_down, width_left and width_right, respectively. * height_down, width_left and width_right, respectively.
*/ */
template <typename PoolProcess, class T> template <typename PoolProcess, class T>
class Pool2dGradFunctor<platform::CPUDeviceContext, PoolProcess, T> { class Pool2dGradFunctor<CPUContext, PoolProcess, T> {
public: public:
void operator()( void operator()(const CPUContext& context,
const platform::CPUDeviceContext& context, const framework::Tensor& input, const DenseTensor& input,
const framework::Tensor& output, const framework::Tensor& output_grad, const DenseTensor& output,
const std::vector<int>& ksize, const std::vector<int>& strides, const DenseTensor& output_grad,
const std::vector<int>& paddings, bool exclusive, bool adaptive, const std::vector<int>& ksize,
framework::Tensor* input_grad, PoolProcess pool_grad_process) { const std::vector<int>& strides,
const std::vector<int>& paddings,
bool exclusive,
bool adaptive,
DenseTensor* input_grad,
PoolProcess pool_grad_process) {
const int batch_size = input.dims()[0]; const int batch_size = input.dims()[0];
const int input_height = input.dims()[2]; const int input_height = input.dims()[2];
const int input_width = input.dims()[3]; const int input_width = input.dims()[3];
...@@ -270,7 +286,7 @@ class Pool2dGradFunctor<platform::CPUDeviceContext, PoolProcess, T> { ...@@ -270,7 +286,7 @@ class Pool2dGradFunctor<platform::CPUDeviceContext, PoolProcess, T> {
const T* input_data = input.data<T>(); const T* input_data = input.data<T>();
const T* output_data = output.data<T>(); const T* output_data = output.data<T>();
const T* output_grad_data = output_grad.data<T>(); const T* output_grad_data = output_grad.data<T>();
T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace()); T* input_grad_data = context.template Alloc<T>(input_grad);
int hstart = 0, hend = 1; int hstart = 0, hend = 1;
int wstart = 0, wend = 1; int wstart = 0, wend = 1;
...@@ -324,12 +340,17 @@ class Pool2dGradFunctor<platform::CPUDeviceContext, PoolProcess, T> { ...@@ -324,12 +340,17 @@ class Pool2dGradFunctor<platform::CPUDeviceContext, PoolProcess, T> {
} }
} }
void operator()( void operator()(const CPUContext& context,
const platform::CPUDeviceContext& context, const framework::Tensor& input, const DenseTensor& input,
const framework::Tensor& output, const framework::Tensor& output_grad, const DenseTensor& output,
const std::vector<int>& ksize, const std::vector<int>& strides, const DenseTensor& output_grad,
const std::vector<int>& paddings, const std::string data_format, const std::vector<int>& ksize,
bool exclusive, bool adaptive, framework::Tensor* input_grad, const std::vector<int>& strides,
const std::vector<int>& paddings,
const std::string data_format,
bool exclusive,
bool adaptive,
DenseTensor* input_grad,
PoolProcess pool_grad_process) { PoolProcess pool_grad_process) {
bool channel_last = (data_format == "NHWC"); bool channel_last = (data_format == "NHWC");
...@@ -357,7 +378,7 @@ class Pool2dGradFunctor<platform::CPUDeviceContext, PoolProcess, T> { ...@@ -357,7 +378,7 @@ class Pool2dGradFunctor<platform::CPUDeviceContext, PoolProcess, T> {
const T* input_data = input.data<T>(); const T* input_data = input.data<T>();
const T* output_data = output.data<T>(); const T* output_data = output.data<T>();
const T* output_grad_data = output_grad.data<T>(); const T* output_grad_data = output_grad.data<T>();
T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace()); T* input_grad_data = context.template Alloc<T>(input_grad);
int hstart = 0, hend = 1; int hstart = 0, hend = 1;
int wstart = 0, wend = 1; int wstart = 0, wend = 1;
...@@ -451,9 +472,10 @@ class Pool2dGradFunctor<platform::CPUDeviceContext, PoolProcess, T> { ...@@ -451,9 +472,10 @@ class Pool2dGradFunctor<platform::CPUDeviceContext, PoolProcess, T> {
h * input_width * input_channels + w * input_channels + c; h * input_width * input_channels + w * input_channels + c;
auto output_idx = ph * output_width * output_channels + auto output_idx = ph * output_width * output_channels +
pw * output_channels + c; pw * output_channels + c;
pool_grad_process.compute( pool_grad_process.compute(input_data[input_idx],
input_data[input_idx], output_data[output_idx], output_data[output_idx],
output_grad_data[output_idx], static_cast<T>(scale), output_grad_data[output_idx],
static_cast<T>(scale),
input_grad_data + input_idx); input_grad_data + input_idx);
} }
} }
...@@ -477,13 +499,16 @@ class Pool2dGradFunctor<platform::CPUDeviceContext, PoolProcess, T> { ...@@ -477,13 +499,16 @@ class Pool2dGradFunctor<platform::CPUDeviceContext, PoolProcess, T> {
* height_down, width_left and width_right, respectively. * height_down, width_left and width_right, respectively.
*/ */
template <class T> template <class T>
class MaxPool2dGradFunctor<platform::CPUDeviceContext, T> { class MaxPool2dGradFunctor<CPUContext, T> {
public: public:
void operator()( void operator()(const CPUContext& context,
const platform::CPUDeviceContext& context, const framework::Tensor& input, const DenseTensor& input,
const framework::Tensor& output, const framework::Tensor& output_grad, const DenseTensor& output,
const std::vector<int>& ksize, const std::vector<int>& strides, const DenseTensor& output_grad,
const std::vector<int>& paddings, framework::Tensor* input_grad) { const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings,
DenseTensor* input_grad) {
const int batch_size = input.dims()[0]; const int batch_size = input.dims()[0];
const int input_height = input.dims()[2]; const int input_height = input.dims()[2];
const int input_width = input.dims()[3]; const int input_width = input.dims()[3];
...@@ -502,7 +527,7 @@ class MaxPool2dGradFunctor<platform::CPUDeviceContext, T> { ...@@ -502,7 +527,7 @@ class MaxPool2dGradFunctor<platform::CPUDeviceContext, T> {
const T* input_data = input.data<T>(); const T* input_data = input.data<T>();
const T* output_data = output.data<T>(); const T* output_data = output.data<T>();
const T* output_grad_data = output_grad.data<T>(); const T* output_grad_data = output_grad.data<T>();
T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace()); T* input_grad_data = context.template Alloc<T>(input_grad);
for (int i = 0; i < batch_size; i++) { for (int i = 0; i < batch_size; i++) {
for (int c = 0; c < output_channels; ++c) { for (int c = 0; c < output_channels; ++c) {
...@@ -536,12 +561,15 @@ class MaxPool2dGradFunctor<platform::CPUDeviceContext, T> { ...@@ -536,12 +561,15 @@ class MaxPool2dGradFunctor<platform::CPUDeviceContext, T> {
} }
} }
void operator()( void operator()(const CPUContext& context,
const platform::CPUDeviceContext& context, const framework::Tensor& input, const DenseTensor& input,
const framework::Tensor& output, const framework::Tensor& output_grad, const DenseTensor& output,
const std::vector<int>& ksize, const std::vector<int>& strides, const DenseTensor& output_grad,
const std::vector<int>& paddings, const std::string data_format, const std::vector<int>& ksize,
framework::Tensor* input_grad) { const std::vector<int>& strides,
const std::vector<int>& paddings,
const std::string data_format,
DenseTensor* input_grad) {
bool channel_last = (data_format == "NHWC"); bool channel_last = (data_format == "NHWC");
const int batch_size = input.dims()[0]; const int batch_size = input.dims()[0];
...@@ -568,7 +596,7 @@ class MaxPool2dGradFunctor<platform::CPUDeviceContext, T> { ...@@ -568,7 +596,7 @@ class MaxPool2dGradFunctor<platform::CPUDeviceContext, T> {
const T* input_data = input.data<T>(); const T* input_data = input.data<T>();
const T* output_data = output.data<T>(); const T* output_data = output.data<T>();
const T* output_grad_data = output_grad.data<T>(); const T* output_grad_data = output_grad.data<T>();
T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace()); T* input_grad_data = context.template Alloc<T>(input_grad);
if (!channel_last) { if (!channel_last) {
const int input_stride = input_height * input_width; const int input_stride = input_height * input_width;
...@@ -641,29 +669,17 @@ class MaxPool2dGradFunctor<platform::CPUDeviceContext, T> { ...@@ -641,29 +669,17 @@ class MaxPool2dGradFunctor<platform::CPUDeviceContext, T> {
} }
} }
}; };
template class MaxPool2dGradFunctor<platform::CPUDeviceContext, float>; template class MaxPool2dGradFunctor<CPUContext, float>;
template class MaxPool2dGradFunctor<platform::CPUDeviceContext, double>; template class MaxPool2dGradFunctor<CPUContext, double>;
template class Pool2dFunctor<platform::CPUDeviceContext, template class Pool2dFunctor<CPUContext, MaxPool<float>, float>;
paddle::operators::math::MaxPool<float>, float>; template class Pool2dFunctor<CPUContext, AvgPool<float>, float>;
template class Pool2dFunctor<platform::CPUDeviceContext, template class Pool2dGradFunctor<CPUContext, MaxPoolGrad<float>, float>;
paddle::operators::math::AvgPool<float>, float>; template class Pool2dGradFunctor<CPUContext, AvgPoolGrad<float>, float>;
template class Pool2dGradFunctor<platform::CPUDeviceContext, template class Pool2dFunctor<CPUContext, MaxPool<double>, double>;
paddle::operators::math::MaxPoolGrad<float>, template class Pool2dFunctor<CPUContext, AvgPool<double>, double>;
float>; template class Pool2dGradFunctor<CPUContext, MaxPoolGrad<double>, double>;
template class Pool2dGradFunctor<platform::CPUDeviceContext, template class Pool2dGradFunctor<CPUContext, AvgPoolGrad<double>, double>;
paddle::operators::math::AvgPoolGrad<float>,
float>;
template class Pool2dFunctor<platform::CPUDeviceContext,
paddle::operators::math::MaxPool<double>, double>;
template class Pool2dFunctor<platform::CPUDeviceContext,
paddle::operators::math::AvgPool<double>, double>;
template class Pool2dGradFunctor<platform::CPUDeviceContext,
paddle::operators::math::MaxPoolGrad<double>,
double>;
template class Pool2dGradFunctor<platform::CPUDeviceContext,
paddle::operators::math::AvgPoolGrad<double>,
double>;
/* /*
* Tensors are in NCDHW or NDHWC format. * Tensors are in NCDHW or NDHWC format.
...@@ -674,13 +690,16 @@ template class Pool2dGradFunctor<platform::CPUDeviceContext, ...@@ -674,13 +690,16 @@ template class Pool2dGradFunctor<platform::CPUDeviceContext,
* height_up, height_down, width_left and width_right, respectively. * height_up, height_down, width_left and width_right, respectively.
*/ */
template <typename PoolProcess, class T> template <typename PoolProcess, class T>
class Pool3dFunctor<platform::CPUDeviceContext, PoolProcess, T> { class Pool3dFunctor<CPUContext, PoolProcess, T> {
public: public:
void operator()(const platform::CPUDeviceContext& context, void operator()(const CPUContext& context,
const framework::Tensor& input, const std::vector<int>& ksize, const DenseTensor& input,
const std::vector<int>& ksize,
const std::vector<int>& strides, const std::vector<int>& strides,
const std::vector<int>& paddings, bool exclusive, const std::vector<int>& paddings,
bool adaptive, framework::Tensor* output, bool exclusive,
bool adaptive,
DenseTensor* output,
PoolProcess pool_process) { PoolProcess pool_process) {
const int batch_size = input.dims()[0]; const int batch_size = input.dims()[0];
const int input_depth = input.dims()[2]; const int input_depth = input.dims()[2];
...@@ -704,7 +723,7 @@ class Pool3dFunctor<platform::CPUDeviceContext, PoolProcess, T> { ...@@ -704,7 +723,7 @@ class Pool3dFunctor<platform::CPUDeviceContext, PoolProcess, T> {
const int output_stride = output_depth * output_height * output_width; const int output_stride = output_depth * output_height * output_width;
const T* input_data = input.data<T>(); const T* input_data = input.data<T>();
T* output_data = output->mutable_data<T>(context.GetPlace()); T* output_data = context.template Alloc<T>(output);
int dstart = 0, dend = 1; int dstart = 0, dend = 1;
int hstart = 0, hend = 1; int hstart = 0, hend = 1;
...@@ -771,12 +790,16 @@ class Pool3dFunctor<platform::CPUDeviceContext, PoolProcess, T> { ...@@ -771,12 +790,16 @@ class Pool3dFunctor<platform::CPUDeviceContext, PoolProcess, T> {
} }
} }
} }
void operator()(const platform::CPUDeviceContext& context, void operator()(const CPUContext& context,
const framework::Tensor& input, const std::vector<int>& ksize, const DenseTensor& input,
const std::vector<int>& ksize,
const std::vector<int>& strides, const std::vector<int>& strides,
const std::vector<int>& paddings, const std::vector<int>& paddings,
const std::string data_format, bool exclusive, bool adaptive, const std::string data_format,
framework::Tensor* output, PoolProcess pool_process) { bool exclusive,
bool adaptive,
DenseTensor* output,
PoolProcess pool_process) {
bool channel_last = (data_format == "NDHWC"); bool channel_last = (data_format == "NDHWC");
const int batch_size = input.dims()[0]; const int batch_size = input.dims()[0];
...@@ -807,7 +830,7 @@ class Pool3dFunctor<platform::CPUDeviceContext, PoolProcess, T> { ...@@ -807,7 +830,7 @@ class Pool3dFunctor<platform::CPUDeviceContext, PoolProcess, T> {
const int padding_width = paddings[2]; const int padding_width = paddings[2];
const T* input_data = input.data<T>(); const T* input_data = input.data<T>();
T* output_data = output->mutable_data<T>(context.GetPlace()); T* output_data = context.template Alloc<T>(output);
int dstart = 0, dend = 1; int dstart = 0, dend = 1;
int hstart = 0, hend = 1; int hstart = 0, hend = 1;
...@@ -966,14 +989,19 @@ class Pool3dFunctor<platform::CPUDeviceContext, PoolProcess, T> { ...@@ -966,14 +989,19 @@ class Pool3dFunctor<platform::CPUDeviceContext, PoolProcess, T> {
* height_up, height_down, width_left and width_right, respectively. * height_up, height_down, width_left and width_right, respectively.
*/ */
template <typename PoolProcess, class T> template <typename PoolProcess, class T>
class Pool3dGradFunctor<platform::CPUDeviceContext, PoolProcess, T> { class Pool3dGradFunctor<CPUContext, PoolProcess, T> {
public: public:
void operator()( void operator()(const CPUContext& context,
const platform::CPUDeviceContext& context, const framework::Tensor& input, const DenseTensor& input,
const framework::Tensor& output, const framework::Tensor& output_grad, const DenseTensor& output,
const std::vector<int>& ksize, const std::vector<int>& strides, const DenseTensor& output_grad,
const std::vector<int>& paddings, bool exclusive, bool adaptive, const std::vector<int>& ksize,
framework::Tensor* input_grad, PoolProcess pool_grad_process) { const std::vector<int>& strides,
const std::vector<int>& paddings,
bool exclusive,
bool adaptive,
DenseTensor* input_grad,
PoolProcess pool_grad_process) {
const int batch_size = input.dims()[0]; const int batch_size = input.dims()[0];
const int input_depth = input.dims()[2]; const int input_depth = input.dims()[2];
const int input_height = input.dims()[3]; const int input_height = input.dims()[3];
...@@ -997,7 +1025,7 @@ class Pool3dGradFunctor<platform::CPUDeviceContext, PoolProcess, T> { ...@@ -997,7 +1025,7 @@ class Pool3dGradFunctor<platform::CPUDeviceContext, PoolProcess, T> {
const T* input_data = input.data<T>(); const T* input_data = input.data<T>();
const T* output_data = output.data<T>(); const T* output_data = output.data<T>();
const T* output_grad_data = output_grad.data<T>(); const T* output_grad_data = output_grad.data<T>();
T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace()); T* input_grad_data = context.template Alloc<T>(input_grad);
int dstart = 0, dend = 1; int dstart = 0, dend = 1;
int hstart = 0, hend = 1; int hstart = 0, hend = 1;
...@@ -1051,9 +1079,10 @@ class Pool3dGradFunctor<platform::CPUDeviceContext, PoolProcess, T> { ...@@ -1051,9 +1079,10 @@ class Pool3dGradFunctor<platform::CPUDeviceContext, PoolProcess, T> {
int input_idx = (d * input_height + h) * input_width + w; int input_idx = (d * input_height + h) * input_width + w;
int output_idx = int output_idx =
(pd * output_height + ph) * output_width + pw; (pd * output_height + ph) * output_width + pw;
pool_grad_process.compute( pool_grad_process.compute(input_data[input_idx],
input_data[input_idx], output_data[output_idx], output_data[output_idx],
output_grad_data[output_idx], static_cast<T>(scale), output_grad_data[output_idx],
static_cast<T>(scale),
input_grad_data + input_idx); input_grad_data + input_idx);
} }
} }
...@@ -1068,12 +1097,17 @@ class Pool3dGradFunctor<platform::CPUDeviceContext, PoolProcess, T> { ...@@ -1068,12 +1097,17 @@ class Pool3dGradFunctor<platform::CPUDeviceContext, PoolProcess, T> {
} }
} }
} }
void operator()( void operator()(const CPUContext& context,
const platform::CPUDeviceContext& context, const framework::Tensor& input, const DenseTensor& input,
const framework::Tensor& output, const framework::Tensor& output_grad, const DenseTensor& output,
const std::vector<int>& ksize, const std::vector<int>& strides, const DenseTensor& output_grad,
const std::vector<int>& paddings, const std::string data_format, const std::vector<int>& ksize,
bool exclusive, bool adaptive, framework::Tensor* input_grad, const std::vector<int>& strides,
const std::vector<int>& paddings,
const std::string data_format,
bool exclusive,
bool adaptive,
DenseTensor* input_grad,
PoolProcess pool_grad_process) { PoolProcess pool_grad_process) {
bool channel_last = (data_format == "NDHWC"); bool channel_last = (data_format == "NDHWC");
...@@ -1105,7 +1139,7 @@ class Pool3dGradFunctor<platform::CPUDeviceContext, PoolProcess, T> { ...@@ -1105,7 +1139,7 @@ class Pool3dGradFunctor<platform::CPUDeviceContext, PoolProcess, T> {
const T* input_data = input.data<T>(); const T* input_data = input.data<T>();
const T* output_data = output.data<T>(); const T* output_data = output.data<T>();
const T* output_grad_data = output_grad.data<T>(); const T* output_grad_data = output_grad.data<T>();
T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace()); T* input_grad_data = context.template Alloc<T>(input_grad);
int dstart = 0, dend = 1; int dstart = 0, dend = 1;
int hstart = 0, hend = 1; int hstart = 0, hend = 1;
...@@ -1164,9 +1198,10 @@ class Pool3dGradFunctor<platform::CPUDeviceContext, PoolProcess, T> { ...@@ -1164,9 +1198,10 @@ class Pool3dGradFunctor<platform::CPUDeviceContext, PoolProcess, T> {
int input_idx = (d * input_height + h) * input_width + w; int input_idx = (d * input_height + h) * input_width + w;
int output_idx = int output_idx =
(pd * output_height + ph) * output_width + pw; (pd * output_height + ph) * output_width + pw;
pool_grad_process.compute( pool_grad_process.compute(input_data[input_idx],
input_data[input_idx], output_data[output_idx], output_data[output_idx],
output_grad_data[output_idx], static_cast<T>(scale), output_grad_data[output_idx],
static_cast<T>(scale),
input_grad_data + input_idx); input_grad_data + input_idx);
} }
} }
...@@ -1241,9 +1276,10 @@ class Pool3dGradFunctor<platform::CPUDeviceContext, PoolProcess, T> { ...@@ -1241,9 +1276,10 @@ class Pool3dGradFunctor<platform::CPUDeviceContext, PoolProcess, T> {
((pd * output_height + ph) * output_width + pw) * ((pd * output_height + ph) * output_width + pw) *
output_channels + output_channels +
c; c;
pool_grad_process.compute( pool_grad_process.compute(input_data[input_idx],
input_data[input_idx], output_data[output_idx], output_data[output_idx],
output_grad_data[output_idx], static_cast<T>(scale), output_grad_data[output_idx],
static_cast<T>(scale),
input_grad_data + input_idx); input_grad_data + input_idx);
} }
} }
...@@ -1270,13 +1306,16 @@ class Pool3dGradFunctor<platform::CPUDeviceContext, PoolProcess, T> { ...@@ -1270,13 +1306,16 @@ class Pool3dGradFunctor<platform::CPUDeviceContext, PoolProcess, T> {
* height_up, height_down, width_left and width_right, respectively. * height_up, height_down, width_left and width_right, respectively.
*/ */
template <class T> template <class T>
class MaxPool3dGradFunctor<platform::CPUDeviceContext, T> { class MaxPool3dGradFunctor<CPUContext, T> {
public: public:
void operator()( void operator()(const CPUContext& context,
const platform::CPUDeviceContext& context, const framework::Tensor& input, const DenseTensor& input,
const framework::Tensor& output, const framework::Tensor& output_grad, const DenseTensor& output,
const std::vector<int>& ksize, const std::vector<int>& strides, const DenseTensor& output_grad,
const std::vector<int>& paddings, framework::Tensor* input_grad) { const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings,
DenseTensor* input_grad) {
const int batch_size = input.dims()[0]; const int batch_size = input.dims()[0];
const int input_depth = input.dims()[2]; const int input_depth = input.dims()[2];
const int input_height = input.dims()[3]; const int input_height = input.dims()[3];
...@@ -1300,7 +1339,7 @@ class MaxPool3dGradFunctor<platform::CPUDeviceContext, T> { ...@@ -1300,7 +1339,7 @@ class MaxPool3dGradFunctor<platform::CPUDeviceContext, T> {
const T* input_data = input.data<T>(); const T* input_data = input.data<T>();
const T* output_data = output.data<T>(); const T* output_data = output.data<T>();
const T* output_grad_data = output_grad.data<T>(); const T* output_grad_data = output_grad.data<T>();
T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace()); T* input_grad_data = context.template Alloc<T>(input_grad);
for (int i = 0; i < batch_size; i++) { for (int i = 0; i < batch_size; i++) {
for (int c = 0; c < output_channels; ++c) { for (int c = 0; c < output_channels; ++c) {
...@@ -1342,12 +1381,15 @@ class MaxPool3dGradFunctor<platform::CPUDeviceContext, T> { ...@@ -1342,12 +1381,15 @@ class MaxPool3dGradFunctor<platform::CPUDeviceContext, T> {
} }
} }
} }
void operator()( void operator()(const CPUContext& context,
const platform::CPUDeviceContext& context, const framework::Tensor& input, const DenseTensor& input,
const framework::Tensor& output, const framework::Tensor& output_grad, const DenseTensor& output,
const std::vector<int>& ksize, const std::vector<int>& strides, const DenseTensor& output_grad,
const std::vector<int>& paddings, const std::string data_format, const std::vector<int>& ksize,
framework::Tensor* input_grad) { const std::vector<int>& strides,
const std::vector<int>& paddings,
const std::string data_format,
DenseTensor* input_grad) {
bool channel_last = (data_format == "NDHWC"); bool channel_last = (data_format == "NDHWC");
const int batch_size = input.dims()[0]; const int batch_size = input.dims()[0];
...@@ -1378,7 +1420,7 @@ class MaxPool3dGradFunctor<platform::CPUDeviceContext, T> { ...@@ -1378,7 +1420,7 @@ class MaxPool3dGradFunctor<platform::CPUDeviceContext, T> {
const T* input_data = input.data<T>(); const T* input_data = input.data<T>();
const T* output_data = output.data<T>(); const T* output_data = output.data<T>();
const T* output_grad_data = output_grad.data<T>(); const T* output_grad_data = output_grad.data<T>();
T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace()); T* input_grad_data = context.template Alloc<T>(input_grad);
if (!channel_last) { if (!channel_last) {
const int input_stride = input_depth * input_height * input_width; const int input_stride = input_depth * input_height * input_width;
...@@ -1475,29 +1517,17 @@ class MaxPool3dGradFunctor<platform::CPUDeviceContext, T> { ...@@ -1475,29 +1517,17 @@ class MaxPool3dGradFunctor<platform::CPUDeviceContext, T> {
} }
} }
}; };
template class MaxPool3dGradFunctor<platform::CPUDeviceContext, float>; template class MaxPool3dGradFunctor<CPUContext, float>;
template class MaxPool3dGradFunctor<platform::CPUDeviceContext, double>; template class MaxPool3dGradFunctor<CPUContext, double>;
template class Pool3dFunctor<platform::CPUDeviceContext, template class Pool3dFunctor<CPUContext, MaxPool<float>, float>;
paddle::operators::math::MaxPool<float>, float>; template class Pool3dFunctor<CPUContext, AvgPool<float>, float>;
template class Pool3dFunctor<platform::CPUDeviceContext, template class Pool3dGradFunctor<CPUContext, MaxPoolGrad<float>, float>;
paddle::operators::math::AvgPool<float>, float>; template class Pool3dGradFunctor<CPUContext, AvgPoolGrad<float>, float>;
template class Pool3dGradFunctor<platform::CPUDeviceContext, template class Pool3dFunctor<CPUContext, MaxPool<double>, double>;
paddle::operators::math::MaxPoolGrad<float>, template class Pool3dFunctor<CPUContext, AvgPool<double>, double>;
float>; template class Pool3dGradFunctor<CPUContext, MaxPoolGrad<double>, double>;
template class Pool3dGradFunctor<platform::CPUDeviceContext, template class Pool3dGradFunctor<CPUContext, AvgPoolGrad<double>, double>;
paddle::operators::math::AvgPoolGrad<float>,
float>;
template class Pool3dFunctor<platform::CPUDeviceContext,
paddle::operators::math::MaxPool<double>, double>;
template class Pool3dFunctor<platform::CPUDeviceContext,
paddle::operators::math::AvgPool<double>, double>;
template class Pool3dGradFunctor<platform::CPUDeviceContext,
paddle::operators::math::MaxPoolGrad<double>,
double>;
template class Pool3dGradFunctor<platform::CPUDeviceContext,
paddle::operators::math::AvgPoolGrad<double>,
double>;
/* /*
* All tensors are in NCHW format. * All tensors are in NCHW format.
...@@ -1505,13 +1535,16 @@ template class Pool3dGradFunctor<platform::CPUDeviceContext, ...@@ -1505,13 +1535,16 @@ template class Pool3dGradFunctor<platform::CPUDeviceContext,
* height and width, respectively. * height and width, respectively.
*/ */
template <typename T1, typename T2> template <typename T1, typename T2>
class MaxPool2dWithIndexFunctor<platform::CPUDeviceContext, T1, T2> { class MaxPool2dWithIndexFunctor<CPUContext, T1, T2> {
public: public:
void operator()(const platform::CPUDeviceContext& context, void operator()(const CPUContext& context,
const framework::Tensor& input, const std::vector<int>& ksize, const DenseTensor& input,
const std::vector<int>& ksize,
const std::vector<int>& strides, const std::vector<int>& strides,
const std::vector<int>& paddings, bool adaptive, const std::vector<int>& paddings,
framework::Tensor* output, framework::Tensor* mask) { bool adaptive,
DenseTensor* output,
DenseTensor* mask) {
const int batch_size = input.dims()[0]; const int batch_size = input.dims()[0];
const int input_height = input.dims()[2]; const int input_height = input.dims()[2];
const int input_width = input.dims()[3]; const int input_width = input.dims()[3];
...@@ -1528,8 +1561,8 @@ class MaxPool2dWithIndexFunctor<platform::CPUDeviceContext, T1, T2> { ...@@ -1528,8 +1561,8 @@ class MaxPool2dWithIndexFunctor<platform::CPUDeviceContext, T1, T2> {
const int output_stride = output_height * output_width; const int output_stride = output_height * output_width;
const T1* input_data = input.data<T1>(); const T1* input_data = input.data<T1>();
T1* output_data = output->mutable_data<T1>(context.GetPlace()); T1* output_data = context.template Alloc<T1>(output);
T2* mask_data = mask->mutable_data<T2>(context.GetPlace()); T2* mask_data = context.template Alloc<T2>(mask);
int hstart, hend; int hstart, hend;
int wstart, wend; int wstart, wend;
...@@ -1583,14 +1616,16 @@ class MaxPool2dWithIndexFunctor<platform::CPUDeviceContext, T1, T2> { ...@@ -1583,14 +1616,16 @@ class MaxPool2dWithIndexFunctor<platform::CPUDeviceContext, T1, T2> {
* height and width, respectively. * height and width, respectively.
*/ */
template <typename T1, typename T2> template <typename T1, typename T2>
class MaxPool2dWithIndexGradFunctor<platform::CPUDeviceContext, T1, T2> { class MaxPool2dWithIndexGradFunctor<CPUContext, T1, T2> {
public: public:
void operator()(const platform::CPUDeviceContext& context, void operator()(const CPUContext& context,
const framework::Tensor& output_grad, const DenseTensor& output_grad,
const framework::Tensor& mask, const std::vector<int>& ksize, const DenseTensor& mask,
const std::vector<int>& ksize,
const std::vector<int>& strides, const std::vector<int>& strides,
const std::vector<int>& paddings, bool adaptive, const std::vector<int>& paddings,
framework::Tensor* input_grad) { bool adaptive,
DenseTensor* input_grad) {
const int batch_size = input_grad->dims()[0]; const int batch_size = input_grad->dims()[0];
const int input_height = input_grad->dims()[2]; const int input_height = input_grad->dims()[2];
const int input_width = input_grad->dims()[3]; const int input_width = input_grad->dims()[3];
...@@ -1602,7 +1637,7 @@ class MaxPool2dWithIndexGradFunctor<platform::CPUDeviceContext, T1, T2> { ...@@ -1602,7 +1637,7 @@ class MaxPool2dWithIndexGradFunctor<platform::CPUDeviceContext, T1, T2> {
const T2* mask_data = mask.data<T2>(); const T2* mask_data = mask.data<T2>();
const T1* output_grad_data = output_grad.data<T1>(); const T1* output_grad_data = output_grad.data<T1>();
T1* input_grad_data = input_grad->mutable_data<T1>(context.GetPlace()); T1* input_grad_data = context.template Alloc<T1>(input_grad);
for (int n = 0; n < batch_size; ++n) { for (int n = 0; n < batch_size; ++n) {
for (int c = 0; c < output_channels; ++c) { for (int c = 0; c < output_channels; ++c) {
...@@ -1622,14 +1657,10 @@ class MaxPool2dWithIndexGradFunctor<platform::CPUDeviceContext, T1, T2> { ...@@ -1622,14 +1657,10 @@ class MaxPool2dWithIndexGradFunctor<platform::CPUDeviceContext, T1, T2> {
} }
}; };
template class MaxPool2dWithIndexFunctor<platform::CPUDeviceContext, float, template class MaxPool2dWithIndexFunctor<CPUContext, float, int>;
int>; template class MaxPool2dWithIndexGradFunctor<CPUContext, float, int>;
template class MaxPool2dWithIndexGradFunctor<platform::CPUDeviceContext, float, template class MaxPool2dWithIndexFunctor<CPUContext, double, int>;
int>; template class MaxPool2dWithIndexGradFunctor<CPUContext, double, int>;
template class MaxPool2dWithIndexFunctor<platform::CPUDeviceContext, double,
int>;
template class MaxPool2dWithIndexGradFunctor<platform::CPUDeviceContext, double,
int>;
/* /*
* All tensors are in NCDHW format. * All tensors are in NCDHW format.
...@@ -1637,13 +1668,16 @@ template class MaxPool2dWithIndexGradFunctor<platform::CPUDeviceContext, double, ...@@ -1637,13 +1668,16 @@ template class MaxPool2dWithIndexGradFunctor<platform::CPUDeviceContext, double,
* depth, height and width, respectively. * depth, height and width, respectively.
*/ */
template <typename T1, typename T2> template <typename T1, typename T2>
class MaxPool3dWithIndexFunctor<platform::CPUDeviceContext, T1, T2> { class MaxPool3dWithIndexFunctor<CPUContext, T1, T2> {
public: public:
void operator()(const platform::CPUDeviceContext& context, void operator()(const CPUContext& context,
const framework::Tensor& input, const std::vector<int>& ksize, const DenseTensor& input,
const std::vector<int>& ksize,
const std::vector<int>& strides, const std::vector<int>& strides,
const std::vector<int>& paddings, bool adaptive, const std::vector<int>& paddings,
framework::Tensor* output, framework::Tensor* mask) { bool adaptive,
DenseTensor* output,
DenseTensor* mask) {
const int batch_size = input.dims()[0]; const int batch_size = input.dims()[0];
const int input_depth = input.dims()[2]; const int input_depth = input.dims()[2];
const int input_height = input.dims()[3]; const int input_height = input.dims()[3];
...@@ -1665,8 +1699,8 @@ class MaxPool3dWithIndexFunctor<platform::CPUDeviceContext, T1, T2> { ...@@ -1665,8 +1699,8 @@ class MaxPool3dWithIndexFunctor<platform::CPUDeviceContext, T1, T2> {
const int output_stride = output_depth * output_height * output_width; const int output_stride = output_depth * output_height * output_width;
const T1* input_data = input.data<T1>(); const T1* input_data = input.data<T1>();
T1* output_data = output->mutable_data<T1>(context.GetPlace()); T1* output_data = context.template Alloc<T1>(output);
T2* mask_data = mask->mutable_data<T2>(context.GetPlace()); T2* mask_data = context.template Alloc<T2>(mask);
int dstart, dend; int dstart, dend;
int hstart, hend; int hstart, hend;
...@@ -1735,14 +1769,16 @@ class MaxPool3dWithIndexFunctor<platform::CPUDeviceContext, T1, T2> { ...@@ -1735,14 +1769,16 @@ class MaxPool3dWithIndexFunctor<platform::CPUDeviceContext, T1, T2> {
* depth, height and width, respectively. * depth, height and width, respectively.
*/ */
template <typename T1, typename T2> template <typename T1, typename T2>
class MaxPool3dWithIndexGradFunctor<platform::CPUDeviceContext, T1, T2> { class MaxPool3dWithIndexGradFunctor<CPUContext, T1, T2> {
public: public:
void operator()(const platform::CPUDeviceContext& context, void operator()(const CPUContext& context,
const framework::Tensor& output_grad, const DenseTensor& output_grad,
const framework::Tensor& mask, const std::vector<int>& ksize, const DenseTensor& mask,
const std::vector<int>& ksize,
const std::vector<int>& strides, const std::vector<int>& strides,
const std::vector<int>& paddings, bool adaptive, const std::vector<int>& paddings,
framework::Tensor* input_grad) { bool adaptive,
DenseTensor* input_grad) {
const int batch_size = input_grad->dims()[0]; const int batch_size = input_grad->dims()[0];
const int input_depth = input_grad->dims()[2]; const int input_depth = input_grad->dims()[2];
const int input_height = input_grad->dims()[3]; const int input_height = input_grad->dims()[3];
...@@ -1756,7 +1792,7 @@ class MaxPool3dWithIndexGradFunctor<platform::CPUDeviceContext, T1, T2> { ...@@ -1756,7 +1792,7 @@ class MaxPool3dWithIndexGradFunctor<platform::CPUDeviceContext, T1, T2> {
const T2* mask_data = mask.data<T2>(); const T2* mask_data = mask.data<T2>();
const T1* output_grad_data = output_grad.data<T1>(); const T1* output_grad_data = output_grad.data<T1>();
T1* input_grad_data = input_grad->mutable_data<T1>(context.GetPlace()); T1* input_grad_data = context.template Alloc<T1>(input_grad);
for (int n = 0; n < batch_size; ++n) { for (int n = 0; n < batch_size; ++n) {
for (int c = 0; c < output_channels; ++c) { for (int c = 0; c < output_channels; ++c) {
...@@ -1779,14 +1815,9 @@ class MaxPool3dWithIndexGradFunctor<platform::CPUDeviceContext, T1, T2> { ...@@ -1779,14 +1815,9 @@ class MaxPool3dWithIndexGradFunctor<platform::CPUDeviceContext, T1, T2> {
} }
}; };
template class MaxPool3dWithIndexFunctor<platform::CPUDeviceContext, float, template class MaxPool3dWithIndexFunctor<CPUContext, float, int>;
int>; template class MaxPool3dWithIndexGradFunctor<CPUContext, float, int>;
template class MaxPool3dWithIndexGradFunctor<platform::CPUDeviceContext, float, template class MaxPool3dWithIndexFunctor<CPUContext, double, int>;
int>; template class MaxPool3dWithIndexGradFunctor<CPUContext, double, int>;
template class MaxPool3dWithIndexFunctor<platform::CPUDeviceContext, double, } // namespace funcs
int>; } // namespace phi
template class MaxPool3dWithIndexGradFunctor<platform::CPUDeviceContext, double,
int>;
} // namespace math
} // namespace operators
} // namespace paddle
/* Copyright (c) 2016 paddlepaddle Authors. All Rights Reserved. /* Copyright (c) 2022 paddlepaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
...@@ -12,62 +12,71 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ...@@ -12,62 +12,71 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "paddle/phi/kernels/funcs/pooling.h"
#include <algorithm> #include <algorithm>
#include <vector> #include <vector>
#include "paddle/fluid/operators/math/pooling.h"
#include "paddle/fluid/platform/device/gpu/gpu_launch_config.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
#include "paddle/fluid/platform/fast_divmod.h" #include "paddle/fluid/platform/fast_divmod.h"
#include "paddle/phi/backends/gpu/gpu_launch_config.h"
namespace paddle { namespace phi {
namespace operators { namespace funcs {
namespace math {
struct FastDivModForPooling { struct FastDivModForPooling {
public: public:
platform::FastDivMod channel; paddle::platform::FastDivMod channel;
platform::FastDivMod width; paddle::platform::FastDivMod width;
platform::FastDivMod height; paddle::platform::FastDivMod height;
explicit HOSTDEVICE FastDivModForPooling(const int channels, explicit HOSTDEVICE FastDivModForPooling(const int channels,
const int output_width, const int output_width,
const int output_height) { const int output_height) {
channel = platform::FastDivMod(channels); channel = paddle::platform::FastDivMod(channels);
width = platform::FastDivMod(output_width); width = paddle::platform::FastDivMod(output_width);
height = platform::FastDivMod(output_height); height = paddle::platform::FastDivMod(output_height);
} }
}; };
struct FastDivModForPoolingWithMoreStaff { struct FastDivModForPoolingWithMoreStaff {
public: public:
platform::FastDivMod channel; paddle::platform::FastDivMod channel;
platform::FastDivMod width; paddle::platform::FastDivMod width;
platform::FastDivMod height; paddle::platform::FastDivMod height;
platform::FastDivMod ksize_w; paddle::platform::FastDivMod ksize_w;
platform::FastDivMod ksize_h; paddle::platform::FastDivMod ksize_h;
platform::FastDivMod stride_w; paddle::platform::FastDivMod stride_w;
platform::FastDivMod stride_h; paddle::platform::FastDivMod stride_h;
explicit HOSTDEVICE FastDivModForPoolingWithMoreStaff( explicit HOSTDEVICE FastDivModForPoolingWithMoreStaff(
const int channels, const int input_width, const int input_height, const int channels,
const int ksize_width, const int ksize_height, const int stride_width, const int input_width,
const int input_height,
const int ksize_width,
const int ksize_height,
const int stride_width,
const int stride_height) { const int stride_height) {
channel = platform::FastDivMod(channels); channel = paddle::platform::FastDivMod(channels);
width = platform::FastDivMod(input_width); width = paddle::platform::FastDivMod(input_width);
height = platform::FastDivMod(input_height); height = paddle::platform::FastDivMod(input_height);
ksize_w = platform::FastDivMod(ksize_width); ksize_w = paddle::platform::FastDivMod(ksize_width);
ksize_h = platform::FastDivMod(ksize_height); ksize_h = paddle::platform::FastDivMod(ksize_height);
stride_w = platform::FastDivMod(stride_width); stride_w = paddle::platform::FastDivMod(stride_width);
stride_h = platform::FastDivMod(stride_height); stride_h = paddle::platform::FastDivMod(stride_height);
} }
}; };
template <typename FastDivModForPooling> template <typename FastDivModForPooling>
__device__ void OffsetPreparationFor4Dimension( __device__ void OffsetPreparationFor4Dimension(int index,
int index, bool channel_last, FastDivModForPooling divmods, bool channel_last,
const int pad_width, const int pad_height, const int aux_width, FastDivModForPooling divmods,
const int aux_height, int* w_offset, int* h_offset, int* c_offset, const int pad_width,
const int pad_height,
const int aux_width,
const int aux_height,
int* w_offset,
int* h_offset,
int* c_offset,
int* stride) { int* stride) {
if (!channel_last) { /* NCHW */ if (!channel_last) { /* NCHW */
auto input_width_divmod = divmods.width.Divmod(index); auto input_width_divmod = divmods.width.Divmod(index);
...@@ -91,21 +100,40 @@ __device__ void OffsetPreparationFor4Dimension( ...@@ -91,21 +100,40 @@ __device__ void OffsetPreparationFor4Dimension(
} }
template <typename PoolProcess, typename T> template <typename PoolProcess, typename T>
__global__ void KernelPool2D( __global__ void KernelPool2D(const int nthreads,
const int nthreads, const T* input_data, const int channels, const T* input_data,
const int input_height, const int input_width, const int output_height, const int channels,
const int output_width, const int ksize_height, const int ksize_width, const int input_height,
const int stride_height, const int stride_width, const int padding_height, const int input_width,
const int padding_width, FastDivModForPooling divmods, const int output_height,
PoolProcess pool_process, bool exclusive, bool adaptive, T* output_data, const int output_width,
const int ksize_height,
const int ksize_width,
const int stride_height,
const int stride_width,
const int padding_height,
const int padding_width,
FastDivModForPooling divmods,
PoolProcess pool_process,
bool exclusive,
bool adaptive,
T* output_data,
bool channel_last = false) { bool channel_last = false) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) { index += blockDim.x * gridDim.x) {
int hstart, hend, wstart, wend; int hstart, hend, wstart, wend;
int w_offset, h_offset, c_offset, input_offset; int w_offset, h_offset, c_offset, input_offset;
OffsetPreparationFor4Dimension<FastDivModForPooling>( OffsetPreparationFor4Dimension<FastDivModForPooling>(index,
index, channel_last, divmods, 0, 0, input_width, input_height, channel_last,
&w_offset, &h_offset, &c_offset, &input_offset); divmods,
0,
0,
input_width,
input_height,
&w_offset,
&h_offset,
&c_offset,
&input_offset);
input_data += input_offset; input_data += input_offset;
if (adaptive) { if (adaptive) {
...@@ -139,25 +167,43 @@ __global__ void KernelPool2D( ...@@ -139,25 +167,43 @@ __global__ void KernelPool2D(
} }
template <typename T, typename PoolProcess> template <typename T, typename PoolProcess>
__global__ void KernelPool2DGrad( __global__ void KernelPool2DGrad(const int nthreads,
const int nthreads, const T* __restrict__ input_data, const T* __restrict__ input_data,
const T* __restrict__ output_data, const const T* __restrict__ output_grad, const T* __restrict__ output_data,
const int output_width, const int output_height, const int input_width, const const T* __restrict__ output_grad,
const int input_height, const int ksize_width, const int ksize_height, const int output_width,
const int stride_width, const int stride_height, const int padding_width, const int output_height,
const int padding_height, FastDivModForPoolingWithMoreStaff divmods, const int input_width,
PoolProcess pool_process, bool exclusive, bool adaptive, const int input_height,
T* __restrict__ input_grad, bool channel_last = false) { const int ksize_width,
const int ksize_height,
const int stride_width,
const int stride_height,
const int padding_width,
const int padding_height,
FastDivModForPoolingWithMoreStaff divmods,
PoolProcess pool_process,
bool exclusive,
bool adaptive,
T* __restrict__ input_grad,
bool channel_last = false) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) { index += blockDim.x * gridDim.x) {
T input = static_cast<T>(0); T input = static_cast<T>(0);
T input_grad_data = static_cast<T>(0); T input_grad_data = static_cast<T>(0);
int phstart, phend, pwstart, pwend; int phstart, phend, pwstart, pwend;
int w_offset, h_offset, c_offset, output_offset; int w_offset, h_offset, c_offset, output_offset;
OffsetPreparationFor4Dimension<>(index, channel_last, divmods, OffsetPreparationFor4Dimension<>(index,
padding_width, padding_height, channel_last,
output_width, output_height, &w_offset, divmods,
&h_offset, &c_offset, &output_offset); padding_width,
padding_height,
output_width,
output_height,
&w_offset,
&h_offset,
&c_offset,
&output_offset);
if (pool_process.use_x) { if (pool_process.use_x) {
input = input_data[index]; input = input_data[index];
output_data += output_offset; output_data += output_offset;
...@@ -188,7 +234,9 @@ __global__ void KernelPool2DGrad( ...@@ -188,7 +234,9 @@ __global__ void KernelPool2DGrad(
: tmp_idx; : tmp_idx;
T ouput_value = pool_process.use_x ? output_data[output_sub_idx] T ouput_value = pool_process.use_x ? output_data[output_sub_idx]
: static_cast<T>(0); : static_cast<T>(0);
pool_process.compute(input, ouput_value, output_grad[output_sub_idx], pool_process.compute(input,
ouput_value,
output_grad[output_sub_idx],
static_cast<T>(1.0 / pool_size), static_cast<T>(1.0 / pool_size),
&input_grad_data); &input_grad_data);
} }
...@@ -217,9 +265,11 @@ __global__ void KernelPool2DGrad( ...@@ -217,9 +265,11 @@ __global__ void KernelPool2DGrad(
: tmp_idx; : tmp_idx;
T ouput_value = pool_process.use_x ? output_data[output_sub_idx] T ouput_value = pool_process.use_x ? output_data[output_sub_idx]
: static_cast<T>(0); : static_cast<T>(0);
pool_process.compute( pool_process.compute(input,
input, ouput_value, output_grad[output_sub_idx], ouput_value,
static_cast<T>(1.0 / pool_size), &input_grad_data); output_grad[output_sub_idx],
static_cast<T>(1.0 / pool_size),
&input_grad_data);
} }
} }
} else { } else {
...@@ -232,9 +282,11 @@ __global__ void KernelPool2DGrad( ...@@ -232,9 +282,11 @@ __global__ void KernelPool2DGrad(
: tmp_idx; : tmp_idx;
T ouput_value = pool_process.use_x ? output_data[output_sub_idx] T ouput_value = pool_process.use_x ? output_data[output_sub_idx]
: static_cast<T>(0); : static_cast<T>(0);
pool_process.compute( pool_process.compute(input,
input, ouput_value, output_grad[output_sub_idx], ouput_value,
static_cast<T>(1.0 / pool_size), &input_grad_data); output_grad[output_sub_idx],
static_cast<T>(1.0 / pool_size),
&input_grad_data);
} }
} }
} }
...@@ -244,19 +296,38 @@ __global__ void KernelPool2DGrad( ...@@ -244,19 +296,38 @@ __global__ void KernelPool2DGrad(
} }
template <typename T> template <typename T>
__global__ void KernelMaxPool2DGrad( __global__ void KernelMaxPool2DGrad(const int nthreads,
const int nthreads, const T* input_data, const T* output_data, const T* input_data,
const T* output_grad, const int channels, const int input_height, const T* output_data,
const int input_width, const int output_height, const int output_width, const T* output_grad,
const int ksize_height, const int ksize_width, const int stride_height, const int channels,
const int stride_width, const int padding_height, const int padding_width, const int input_height,
T* input_grad, FastDivModForPooling divmods, bool channel_last = false) { const int input_width,
const int output_height,
const int output_width,
const int ksize_height,
const int ksize_width,
const int stride_height,
const int stride_width,
const int padding_height,
const int padding_width,
T* input_grad,
FastDivModForPooling divmods,
bool channel_last = false) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) { index += blockDim.x * gridDim.x) {
int w_offset, h_offset, c_offset, input_offset; int w_offset, h_offset, c_offset, input_offset;
OffsetPreparationFor4Dimension<FastDivModForPooling>( OffsetPreparationFor4Dimension<FastDivModForPooling>(index,
index, channel_last, divmods, 0, 0, input_width, input_height, channel_last,
&w_offset, &h_offset, &c_offset, &input_offset); divmods,
0,
0,
input_width,
input_height,
&w_offset,
&h_offset,
&c_offset,
&input_offset);
input_data += input_offset; input_data += input_offset;
input_grad += input_offset; input_grad += input_offset;
...@@ -285,17 +356,24 @@ __global__ void KernelMaxPool2DGrad( ...@@ -285,17 +356,24 @@ __global__ void KernelMaxPool2DGrad(
if (maxIndex != -1) { if (maxIndex != -1) {
// atomic add // atomic add
platform::CudaAtomicAdd(input_grad + maxIndex, output_grad[index]); paddle::platform::CudaAtomicAdd(input_grad + maxIndex,
output_grad[index]);
} }
} }
} }
template <typename PoolProcess, typename T> template <typename PoolProcess, typename T>
void Pool2dDirectCUDAFunctor<PoolProcess, T>::operator()( void Pool2dDirectCUDAFunctor<PoolProcess, T>::operator()(
const T* input, const std::vector<int>& input_shape, const T* input,
const std::vector<int>& output_shape, const std::vector<int>& ksize, const std::vector<int>& input_shape,
const std::vector<int>& strides, const std::vector<int>& paddings, const std::vector<int>& output_shape,
bool exclusive, bool adaptive, T* output, gpuStream_t stream, const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings,
bool exclusive,
bool adaptive,
T* output,
gpuStream_t stream,
PoolProcess pool_compute) { PoolProcess pool_compute) {
const int batch_size = input_shape[0]; const int batch_size = input_shape[0];
const int input_channels = input_shape[1]; const int input_channels = input_shape[1];
...@@ -314,7 +392,7 @@ void Pool2dDirectCUDAFunctor<PoolProcess, T>::operator()( ...@@ -314,7 +392,7 @@ void Pool2dDirectCUDAFunctor<PoolProcess, T>::operator()(
int nthreads = batch_size * output_channels * output_height * output_width; int nthreads = batch_size * output_channels * output_height * output_width;
int thread_num = 1024; int thread_num = 1024;
#ifdef WITH_NV_JETSON #ifdef WITH_NV_JETSON
// platform::ChangeThreadNum(context, &thread_num); // paddle::platform::ChangeThreadNum(context, &thread_num);
thread_num = 512; thread_num = 512;
#endif #endif
int blocks = (nthreads + thread_num - 1) / thread_num; int blocks = (nthreads + thread_num - 1) / thread_num;
...@@ -323,11 +401,24 @@ void Pool2dDirectCUDAFunctor<PoolProcess, T>::operator()( ...@@ -323,11 +401,24 @@ void Pool2dDirectCUDAFunctor<PoolProcess, T>::operator()(
auto pool_divmods = auto pool_divmods =
FastDivModForPooling(input_channels, output_width, output_height); FastDivModForPooling(input_channels, output_width, output_height);
KernelPool2D<PoolProcess, T><<<grid, threads, 0, stream>>>( KernelPool2D<PoolProcess, T><<<grid, threads, 0, stream>>>(nthreads,
nthreads, input, input_channels, input_height, input_width, output_height, input,
output_width, ksize_height, ksize_width, stride_height, stride_width, input_channels,
padding_height, padding_width, pool_divmods, pool_compute, exclusive, input_height,
adaptive, output); input_width,
output_height,
output_width,
ksize_height,
ksize_width,
stride_height,
stride_width,
padding_height,
padding_width,
pool_divmods,
pool_compute,
exclusive,
adaptive,
output);
} }
/* /*
...@@ -338,13 +429,16 @@ void Pool2dDirectCUDAFunctor<PoolProcess, T>::operator()( ...@@ -338,13 +429,16 @@ void Pool2dDirectCUDAFunctor<PoolProcess, T>::operator()(
* height_down, width_left and width_right, respectively. * height_down, width_left and width_right, respectively.
*/ */
template <typename PoolProcess, typename T> template <typename PoolProcess, typename T>
class Pool2dFunctor<platform::CUDADeviceContext, PoolProcess, T> { class Pool2dFunctor<phi::GPUContext, PoolProcess, T> {
public: public:
void operator()(const platform::CUDADeviceContext& context, void operator()(const phi::GPUContext& context,
const framework::Tensor& input, const std::vector<int>& ksize, const DenseTensor& input,
const std::vector<int>& ksize,
const std::vector<int>& strides, const std::vector<int>& strides,
const std::vector<int>& paddings, bool exclusive, const std::vector<int>& paddings,
bool adaptive, framework::Tensor* output, bool exclusive,
bool adaptive,
DenseTensor* output,
PoolProcess pool_process) { PoolProcess pool_process) {
const int batch_size = input.dims()[0]; const int batch_size = input.dims()[0];
const int input_channels = input.dims()[1]; const int input_channels = input.dims()[1];
...@@ -361,12 +455,12 @@ class Pool2dFunctor<platform::CUDADeviceContext, PoolProcess, T> { ...@@ -361,12 +455,12 @@ class Pool2dFunctor<platform::CUDADeviceContext, PoolProcess, T> {
const int padding_width = paddings[1]; const int padding_width = paddings[1];
const T* input_data = input.data<T>(); const T* input_data = input.data<T>();
T* output_data = output->mutable_data<T>(context.GetPlace()); T* output_data = context.template Alloc<T>(output);
int nthreads = batch_size * output_channels * output_height * output_width; int nthreads = batch_size * output_channels * output_height * output_width;
int thread_num = 1024; int thread_num = 1024;
#ifdef WITH_NV_JETSON #ifdef WITH_NV_JETSON
platform::ChangeThreadNum(context, &thread_num); paddle::platform::ChangeThreadNum(context, &thread_num);
#endif #endif
int blocks = (nthreads + thread_num - 1) / thread_num; int blocks = (nthreads + thread_num - 1) / thread_num;
dim3 threads(thread_num, 1); dim3 threads(thread_num, 1);
...@@ -375,17 +469,35 @@ class Pool2dFunctor<platform::CUDADeviceContext, PoolProcess, T> { ...@@ -375,17 +469,35 @@ class Pool2dFunctor<platform::CUDADeviceContext, PoolProcess, T> {
auto pool_divmods = auto pool_divmods =
FastDivModForPooling(input_channels, output_width, output_height); FastDivModForPooling(input_channels, output_width, output_height);
KernelPool2D<PoolProcess, T><<<grid, threads, 0, context.stream()>>>( KernelPool2D<PoolProcess, T><<<grid, threads, 0, context.stream()>>>(
nthreads, input_data, input_channels, input_height, input_width, nthreads,
output_height, output_width, ksize_height, ksize_width, stride_height, input_data,
stride_width, padding_height, padding_width, pool_divmods, pool_process, input_channels,
exclusive, adaptive, output_data); input_height,
} input_width,
void operator()(const platform::CUDADeviceContext& context, output_height,
const framework::Tensor& input, const std::vector<int>& ksize, output_width,
ksize_height,
ksize_width,
stride_height,
stride_width,
padding_height,
padding_width,
pool_divmods,
pool_process,
exclusive,
adaptive,
output_data);
}
void operator()(const phi::GPUContext& context,
const DenseTensor& input,
const std::vector<int>& ksize,
const std::vector<int>& strides, const std::vector<int>& strides,
const std::vector<int>& paddings, const std::vector<int>& paddings,
const std::string data_format, bool exclusive, bool adaptive, const std::string data_format,
framework::Tensor* output, PoolProcess pool_process) { bool exclusive,
bool adaptive,
DenseTensor* output,
PoolProcess pool_process) {
bool channel_last = (data_format == "NHWC"); bool channel_last = (data_format == "NHWC");
const int batch_size = input.dims()[0]; const int batch_size = input.dims()[0];
...@@ -410,12 +522,12 @@ class Pool2dFunctor<platform::CUDADeviceContext, PoolProcess, T> { ...@@ -410,12 +522,12 @@ class Pool2dFunctor<platform::CUDADeviceContext, PoolProcess, T> {
const int padding_width = paddings[1]; const int padding_width = paddings[1];
const T* input_data = input.data<T>(); const T* input_data = input.data<T>();
T* output_data = output->mutable_data<T>(context.GetPlace()); T* output_data = context.template Alloc<T>(output);
int nthreads = batch_size * output_channels * output_height * output_width; int nthreads = batch_size * output_channels * output_height * output_width;
int thread_num = 1024; int thread_num = 1024;
#ifdef WITH_NV_JETSON #ifdef WITH_NV_JETSON
platform::ChangeThreadNum(context, &thread_num); paddle::platform::ChangeThreadNum(context, &thread_num);
#endif #endif
int blocks = (nthreads + thread_num - 1) / thread_num; int blocks = (nthreads + thread_num - 1) / thread_num;
dim3 threads(thread_num, 1); dim3 threads(thread_num, 1);
...@@ -424,10 +536,25 @@ class Pool2dFunctor<platform::CUDADeviceContext, PoolProcess, T> { ...@@ -424,10 +536,25 @@ class Pool2dFunctor<platform::CUDADeviceContext, PoolProcess, T> {
auto pool_divmods = auto pool_divmods =
FastDivModForPooling(input_channels, output_width, output_height); FastDivModForPooling(input_channels, output_width, output_height);
KernelPool2D<PoolProcess, T><<<grid, threads, 0, context.stream()>>>( KernelPool2D<PoolProcess, T><<<grid, threads, 0, context.stream()>>>(
nthreads, input_data, input_channels, input_height, input_width, nthreads,
output_height, output_width, ksize_height, ksize_width, stride_height, input_data,
stride_width, padding_height, padding_width, pool_divmods, pool_process, input_channels,
exclusive, adaptive, output_data, channel_last); input_height,
input_width,
output_height,
output_width,
ksize_height,
ksize_width,
stride_height,
stride_width,
padding_height,
padding_width,
pool_divmods,
pool_process,
exclusive,
adaptive,
output_data,
channel_last);
} }
}; };
/* /*
...@@ -438,16 +565,18 @@ class Pool2dFunctor<platform::CUDADeviceContext, PoolProcess, T> { ...@@ -438,16 +565,18 @@ class Pool2dFunctor<platform::CUDADeviceContext, PoolProcess, T> {
* height_down, width_left and width_right, respectively. * height_down, width_left and width_right, respectively.
*/ */
template <typename PoolProcess, typename T> template <typename PoolProcess, typename T>
class Pool2dGradFunctor<platform::CUDADeviceContext, PoolProcess, T> { class Pool2dGradFunctor<phi::GPUContext, PoolProcess, T> {
public: public:
void operator()(const platform::CUDADeviceContext& context, void operator()(const phi::GPUContext& context,
const framework::Tensor& input, const DenseTensor& input,
const framework::Tensor& output, const DenseTensor& output,
const framework::Tensor& output_grad, const DenseTensor& output_grad,
const std::vector<int>& ksize, const std::vector<int>& ksize,
const std::vector<int>& strides, const std::vector<int>& strides,
const std::vector<int>& paddings, bool exclusive, const std::vector<int>& paddings,
bool adaptive, framework::Tensor* input_grad, bool exclusive,
bool adaptive,
DenseTensor* input_grad,
PoolProcess pool_process) { PoolProcess pool_process) {
const int batch_size = input.dims()[0]; const int batch_size = input.dims()[0];
const int input_channels = input.dims()[1]; const int input_channels = input.dims()[1];
...@@ -465,30 +594,53 @@ class Pool2dGradFunctor<platform::CUDADeviceContext, PoolProcess, T> { ...@@ -465,30 +594,53 @@ class Pool2dGradFunctor<platform::CUDADeviceContext, PoolProcess, T> {
const T* input_data = input.data<T>(); const T* input_data = input.data<T>();
const T* output_data = output.data<T>(); const T* output_data = output.data<T>();
const T* output_grad_data = output_grad.data<T>(); const T* output_grad_data = output_grad.data<T>();
T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace()); T* input_grad_data = context.template Alloc<T>(input_grad);
int nthreads = batch_size * input_channels * input_height * input_width; int nthreads = batch_size * input_channels * input_height * input_width;
auto pool_divmods = FastDivModForPoolingWithMoreStaff( auto pool_divmods = FastDivModForPoolingWithMoreStaff(input_channels,
input_channels, input_width, input_height, ksize_width, ksize_height, input_width,
stride_width, stride_height); input_height,
ksize_width,
auto config = GetGpuLaunchConfig1D(context, nthreads); ksize_height,
KernelPool2DGrad<T, PoolProcess><<< stride_width,
config.block_per_grid, config.thread_per_block, 0, context.stream()>>>( stride_height);
nthreads, input_data, output_data, output_grad_data, output_width,
output_height, input_width, input_height, ksize_width, ksize_height, auto config = phi::backends::gpu::GetGpuLaunchConfig1D(context, nthreads);
stride_width, stride_height, padding_width, padding_height, KernelPool2DGrad<T, PoolProcess><<<config.block_per_grid,
pool_divmods, pool_process, exclusive, adaptive, input_grad_data); config.thread_per_block,
} 0,
void operator()(const platform::CUDADeviceContext& context, context.stream()>>>(nthreads,
const framework::Tensor& input, input_data,
const framework::Tensor& output, output_data,
const framework::Tensor& output_grad, output_grad_data,
output_width,
output_height,
input_width,
input_height,
ksize_width,
ksize_height,
stride_width,
stride_height,
padding_width,
padding_height,
pool_divmods,
pool_process,
exclusive,
adaptive,
input_grad_data);
}
void operator()(const phi::GPUContext& context,
const DenseTensor& input,
const DenseTensor& output,
const DenseTensor& output_grad,
const std::vector<int>& ksize, const std::vector<int>& ksize,
const std::vector<int>& strides, const std::vector<int>& strides,
const std::vector<int>& paddings, const std::vector<int>& paddings,
const std::string data_format, bool exclusive, bool adaptive, const std::string data_format,
framework::Tensor* input_grad, PoolProcess pool_process) { bool exclusive,
bool adaptive,
DenseTensor* input_grad,
PoolProcess pool_process) {
bool channel_last = (data_format == "NHWC"); bool channel_last = (data_format == "NHWC");
const int batch_size = input.dims()[0]; const int batch_size = input.dims()[0];
...@@ -514,20 +666,40 @@ class Pool2dGradFunctor<platform::CUDADeviceContext, PoolProcess, T> { ...@@ -514,20 +666,40 @@ class Pool2dGradFunctor<platform::CUDADeviceContext, PoolProcess, T> {
const T* input_data = input.data<T>(); const T* input_data = input.data<T>();
const T* output_data = output.data<T>(); const T* output_data = output.data<T>();
const T* output_grad_data = output_grad.data<T>(); const T* output_grad_data = output_grad.data<T>();
T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace()); T* input_grad_data = context.template Alloc<T>(input_grad);
int nthreads = batch_size * input_channels * input_height * input_width; int nthreads = batch_size * input_channels * input_height * input_width;
auto pool_divmods = FastDivModForPoolingWithMoreStaff( auto pool_divmods = FastDivModForPoolingWithMoreStaff(input_channels,
input_channels, input_width, input_height, ksize_width, ksize_height, input_width,
stride_width, stride_height); input_height,
ksize_width,
auto config = GetGpuLaunchConfig1D(context, nthreads); ksize_height,
KernelPool2DGrad<T, PoolProcess><<< stride_width,
config.block_per_grid, config.thread_per_block, 0, context.stream()>>>( stride_height);
nthreads, input_data, output_data, output_grad_data, output_width,
output_height, input_width, input_height, ksize_width, ksize_height, auto config = phi::backends::gpu::GetGpuLaunchConfig1D(context, nthreads);
stride_width, stride_height, padding_width, padding_height, KernelPool2DGrad<T, PoolProcess><<<config.block_per_grid,
pool_divmods, pool_process, exclusive, adaptive, input_grad_data, config.thread_per_block,
0,
context.stream()>>>(nthreads,
input_data,
output_data,
output_grad_data,
output_width,
output_height,
input_width,
input_height,
ksize_width,
ksize_height,
stride_width,
stride_height,
padding_width,
padding_height,
pool_divmods,
pool_process,
exclusive,
adaptive,
input_grad_data,
channel_last); channel_last);
} }
}; };
...@@ -540,16 +712,16 @@ class Pool2dGradFunctor<platform::CUDADeviceContext, PoolProcess, T> { ...@@ -540,16 +712,16 @@ class Pool2dGradFunctor<platform::CUDADeviceContext, PoolProcess, T> {
* height_down, width_left and width_right, respectively. * height_down, width_left and width_right, respectively.
*/ */
template <typename T> template <typename T>
class MaxPool2dGradFunctor<platform::CUDADeviceContext, T> { class MaxPool2dGradFunctor<phi::GPUContext, T> {
public: public:
void operator()(const platform::CUDADeviceContext& context, void operator()(const phi::GPUContext& context,
const framework::Tensor& input, const DenseTensor& input,
const framework::Tensor& output, const DenseTensor& output,
const framework::Tensor& output_grad, const DenseTensor& output_grad,
const std::vector<int>& ksize, const std::vector<int>& ksize,
const std::vector<int>& strides, const std::vector<int>& strides,
const std::vector<int>& paddings, const std::vector<int>& paddings,
framework::Tensor* input_grad) { DenseTensor* input_grad) {
const int batch_size = input.dims()[0]; const int batch_size = input.dims()[0];
const int input_channels = input.dims()[1]; const int input_channels = input.dims()[1];
const int input_height = input.dims()[2]; const int input_height = input.dims()[2];
...@@ -567,7 +739,7 @@ class MaxPool2dGradFunctor<platform::CUDADeviceContext, T> { ...@@ -567,7 +739,7 @@ class MaxPool2dGradFunctor<platform::CUDADeviceContext, T> {
const T* input_data = input.data<T>(); const T* input_data = input.data<T>();
const T* output_data = output.data<T>(); const T* output_data = output.data<T>();
const T* output_grad_data = output_grad.data<T>(); const T* output_grad_data = output_grad.data<T>();
T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace()); T* input_grad_data = context.template Alloc<T>(input_grad);
int nthreads = batch_size * output_channels * output_height * output_width; int nthreads = batch_size * output_channels * output_height * output_width;
int blocks = (nthreads + 1024 - 1) / 1024; int blocks = (nthreads + 1024 - 1) / 1024;
...@@ -577,17 +749,33 @@ class MaxPool2dGradFunctor<platform::CUDADeviceContext, T> { ...@@ -577,17 +749,33 @@ class MaxPool2dGradFunctor<platform::CUDADeviceContext, T> {
auto pool_divmods = auto pool_divmods =
FastDivModForPooling(input_channels, output_width, output_height); FastDivModForPooling(input_channels, output_width, output_height);
KernelMaxPool2DGrad<T><<<grid, threads, 0, context.stream()>>>( KernelMaxPool2DGrad<T><<<grid, threads, 0, context.stream()>>>(
nthreads, input_data, output_data, output_grad_data, input_channels, nthreads,
input_height, input_width, output_height, output_width, ksize_height, input_data,
ksize_width, stride_height, stride_width, padding_height, padding_width, output_data,
input_grad_data, pool_divmods); output_grad_data,
} input_channels,
void operator()( input_height,
const platform::CUDADeviceContext& context, input_width,
const framework::Tensor& input, const framework::Tensor& output, output_height,
const framework::Tensor& output_grad, const std::vector<int>& ksize, output_width,
const std::vector<int>& strides, const std::vector<int>& paddings, ksize_height,
const std::string data_format, framework::Tensor* input_grad) { ksize_width,
stride_height,
stride_width,
padding_height,
padding_width,
input_grad_data,
pool_divmods);
}
void operator()(const phi::GPUContext& context,
const DenseTensor& input,
const DenseTensor& output,
const DenseTensor& output_grad,
const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings,
const std::string data_format,
DenseTensor* input_grad) {
bool channel_last = (data_format == "NHWC"); bool channel_last = (data_format == "NHWC");
const int batch_size = input.dims()[0]; const int batch_size = input.dims()[0];
...@@ -614,7 +802,7 @@ class MaxPool2dGradFunctor<platform::CUDADeviceContext, T> { ...@@ -614,7 +802,7 @@ class MaxPool2dGradFunctor<platform::CUDADeviceContext, T> {
const T* input_data = input.data<T>(); const T* input_data = input.data<T>();
const T* output_data = output.data<T>(); const T* output_data = output.data<T>();
const T* output_grad_data = output_grad.data<T>(); const T* output_grad_data = output_grad.data<T>();
T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace()); T* input_grad_data = context.template Alloc<T>(input_grad);
int nthreads = batch_size * output_channels * output_height * output_width; int nthreads = batch_size * output_channels * output_height * output_width;
int blocks = (nthreads + 1024 - 1) / 1024; int blocks = (nthreads + 1024 - 1) / 1024;
...@@ -625,70 +813,79 @@ class MaxPool2dGradFunctor<platform::CUDADeviceContext, T> { ...@@ -625,70 +813,79 @@ class MaxPool2dGradFunctor<platform::CUDADeviceContext, T> {
FastDivModForPooling(input_channels, output_width, output_height); FastDivModForPooling(input_channels, output_width, output_height);
KernelMaxPool2DGrad<T><<<grid, threads, 0, context.stream()>>>( KernelMaxPool2DGrad<T><<<grid, threads, 0, context.stream()>>>(
nthreads, input_data, output_data, output_grad_data, input_channels, nthreads,
input_height, input_width, output_height, output_width, ksize_height, input_data,
ksize_width, stride_height, stride_width, padding_height, padding_width, output_data,
input_grad_data, pool_divmods, channel_last); output_grad_data,
input_channels,
input_height,
input_width,
output_height,
output_width,
ksize_height,
ksize_width,
stride_height,
stride_width,
padding_height,
padding_width,
input_grad_data,
pool_divmods,
channel_last);
} }
}; };
template class Pool2dDirectCUDAFunctor<paddle::operators::math::MaxPool<float>, template class Pool2dDirectCUDAFunctor<MaxPool<float>, float>;
float>; template class Pool2dDirectCUDAFunctor<AvgPool<float>, float>;
template class Pool2dDirectCUDAFunctor<paddle::operators::math::AvgPool<float>,
float>; template class MaxPool2dGradFunctor<phi::GPUContext, float>;
template class MaxPool2dGradFunctor<phi::GPUContext, double>;
template class MaxPool2dGradFunctor<platform::CUDADeviceContext, float>; template class MaxPool2dGradFunctor<phi::GPUContext, dtype::float16>;
template class MaxPool2dGradFunctor<platform::CUDADeviceContext, double>;
template class MaxPool2dGradFunctor<platform::CUDADeviceContext, template class Pool2dFunctor<phi::GPUContext, MaxPool<float>, float>;
paddle::platform::float16>; template class Pool2dFunctor<phi::GPUContext, AvgPool<float>, float>;
template class Pool2dGradFunctor<phi::GPUContext, MaxPoolGrad<float>, float>;
template class Pool2dFunctor<platform::CUDADeviceContext, template class Pool2dGradFunctor<phi::GPUContext, AvgPoolGrad<float>, float>;
paddle::operators::math::MaxPool<float>, float>; template class Pool2dFunctor<phi::GPUContext, MaxPool<double>, double>;
template class Pool2dFunctor<platform::CUDADeviceContext, template class Pool2dFunctor<phi::GPUContext, AvgPool<double>, double>;
paddle::operators::math::AvgPool<float>, float>; template class Pool2dGradFunctor<phi::GPUContext, MaxPoolGrad<double>, double>;
template class Pool2dGradFunctor<platform::CUDADeviceContext, template class Pool2dGradFunctor<phi::GPUContext, AvgPoolGrad<double>, double>;
paddle::operators::math::MaxPoolGrad<float>,
float>; template class Pool2dFunctor<phi::GPUContext,
template class Pool2dGradFunctor<platform::CUDADeviceContext, MaxPool<dtype::float16>,
paddle::operators::math::AvgPoolGrad<float>, dtype::float16>;
float>; template class Pool2dFunctor<phi::GPUContext,
template class Pool2dFunctor<platform::CUDADeviceContext, AvgPool<dtype::float16>,
paddle::operators::math::MaxPool<double>, double>; dtype::float16>;
template class Pool2dFunctor<platform::CUDADeviceContext, template class Pool2dGradFunctor<phi::GPUContext,
paddle::operators::math::AvgPool<double>, double>; MaxPoolGrad<dtype::float16>,
template class Pool2dGradFunctor<platform::CUDADeviceContext, dtype::float16>;
paddle::operators::math::MaxPoolGrad<double>, template class Pool2dGradFunctor<phi::GPUContext,
double>; AvgPoolGrad<dtype::float16>,
template class Pool2dGradFunctor<platform::CUDADeviceContext, dtype::float16>;
paddle::operators::math::AvgPoolGrad<double>,
double>;
template class Pool2dFunctor<
platform::CUDADeviceContext,
paddle::operators::math::MaxPool<paddle::platform::float16>,
paddle::platform::float16>;
template class Pool2dFunctor<
platform::CUDADeviceContext,
paddle::operators::math::AvgPool<paddle::platform::float16>,
paddle::platform::float16>;
template class Pool2dGradFunctor<
platform::CUDADeviceContext,
paddle::operators::math::MaxPoolGrad<paddle::platform::float16>,
paddle::platform::float16>;
template class Pool2dGradFunctor<
platform::CUDADeviceContext,
paddle::operators::math::AvgPoolGrad<paddle::platform::float16>,
paddle::platform::float16>;
template <typename PoolProcess, typename T> template <typename PoolProcess, typename T>
__global__ void KernelPool3D( __global__ void KernelPool3D(const int nthreads,
const int nthreads, const T* input_data, const int channels, const T* input_data,
const int input_depth, const int input_height, const int input_width, const int channels,
const int output_depth, const int output_height, const int output_width, const int input_depth,
const int ksize_depth, const int ksize_height, const int ksize_width, const int input_height,
const int stride_depth, const int stride_height, const int stride_width, const int input_width,
const int padding_depth, const int padding_height, const int padding_width, const int output_depth,
PoolProcess pool_process, bool exclusive, bool adaptive, T* output_data, const int output_height,
const int output_width,
const int ksize_depth,
const int ksize_height,
const int ksize_width,
const int stride_depth,
const int stride_height,
const int stride_width,
const int padding_depth,
const int padding_height,
const int padding_width,
PoolProcess pool_process,
bool exclusive,
bool adaptive,
T* output_data,
bool channel_last = false) { bool channel_last = false) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) { index += blockDim.x * gridDim.x) {
...@@ -764,16 +961,31 @@ __global__ void KernelPool3D( ...@@ -764,16 +961,31 @@ __global__ void KernelPool3D(
} }
template <typename T, typename PoolProcess> template <typename T, typename PoolProcess>
__global__ void KernelPool3DGrad( __global__ void KernelPool3DGrad(const int nthreads,
const int nthreads, const T* __restrict__ input_data, const T* __restrict__ input_data,
const T* __restrict__ output_data, const T* __restrict__ output_grad, const T* __restrict__ output_data,
const int channels, const int input_depth, const int input_height, const T* __restrict__ output_grad,
const int input_width, const int output_depth, const int output_height, const int channels,
const int output_width, const int ksize_depth, const int ksize_height, const int input_depth,
const int ksize_width, const int stride_depth, const int stride_height, const int input_height,
const int stride_width, const int padding_depth, const int padding_height, const int input_width,
const int padding_width, PoolProcess pool_process, bool exclusive, const int output_depth,
bool adaptive, T* input_grad, bool channel_last = false) { const int output_height,
const int output_width,
const int ksize_depth,
const int ksize_height,
const int ksize_width,
const int stride_depth,
const int stride_height,
const int stride_width,
const int padding_depth,
const int padding_height,
const int padding_width,
PoolProcess pool_process,
bool exclusive,
bool adaptive,
T* input_grad,
bool channel_last = false) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) { index += blockDim.x * gridDim.x) {
int w_offset, h_offset, d_offset, c_offset, batch_idx, output_stride; int w_offset, h_offset, d_offset, c_offset, batch_idx, output_stride;
...@@ -867,7 +1079,9 @@ __global__ void KernelPool3DGrad( ...@@ -867,7 +1079,9 @@ __global__ void KernelPool3DGrad(
: (pd * output_height + ph) * output_width + pw; : (pd * output_height + ph) * output_width + pw;
T ouput_value = pool_process.use_x ? output_data[output_sub_idx] T ouput_value = pool_process.use_x ? output_data[output_sub_idx]
: static_cast<T>(0); : static_cast<T>(0);
pool_process.compute(input, ouput_value, output_grad[output_sub_idx], pool_process.compute(input,
ouput_value,
output_grad[output_sub_idx],
static_cast<T>(1.0 / pool_size), static_cast<T>(1.0 / pool_size),
&input_grad_data); &input_grad_data);
} }
...@@ -878,14 +1092,27 @@ __global__ void KernelPool3DGrad( ...@@ -878,14 +1092,27 @@ __global__ void KernelPool3DGrad(
} }
template <typename T> template <typename T>
__global__ void KernelMaxPool3DGrad( __global__ void KernelMaxPool3DGrad(const int nthreads,
const int nthreads, const T* input_data, const T* output_data, const T* input_data,
const T* output_grad, const int channels, const int input_depth, const T* output_data,
const int input_height, const int input_width, const int output_depth, const T* output_grad,
const int output_height, const int output_width, const int ksize_depth, const int channels,
const int ksize_height, const int ksize_width, const int stride_depth, const int input_depth,
const int stride_height, const int stride_width, const int padding_depth, const int input_height,
const int padding_height, const int padding_width, T* input_grad, const int input_width,
const int output_depth,
const int output_height,
const int output_width,
const int ksize_depth,
const int ksize_height,
const int ksize_width,
const int stride_depth,
const int stride_height,
const int stride_width,
const int padding_depth,
const int padding_height,
const int padding_width,
T* input_grad,
bool channel_last = false) { bool channel_last = false) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) { index += blockDim.x * gridDim.x) {
...@@ -949,17 +1176,23 @@ __global__ void KernelMaxPool3DGrad( ...@@ -949,17 +1176,23 @@ __global__ void KernelMaxPool3DGrad(
} }
if (maxIdx != -1) { if (maxIdx != -1) {
// atomic add // atomic add
platform::CudaAtomicAdd(input_grad + maxIdx, output_grad[index]); paddle::platform::CudaAtomicAdd(input_grad + maxIdx, output_grad[index]);
} }
} }
} }
template <typename PoolProcess, typename T> template <typename PoolProcess, typename T>
void Pool3dDirectCUDAFunctor<PoolProcess, T>::operator()( void Pool3dDirectCUDAFunctor<PoolProcess, T>::operator()(
const T* input, const std::vector<int>& input_shape, const T* input,
const std::vector<int>& output_shape, const std::vector<int>& ksize, const std::vector<int>& input_shape,
const std::vector<int>& strides, const std::vector<int>& paddings, const std::vector<int>& output_shape,
bool exclusive, bool adaptive, T* output, gpuStream_t stream, const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings,
bool exclusive,
bool adaptive,
T* output,
gpuStream_t stream,
PoolProcess pool_compute) { PoolProcess pool_compute) {
const int batch_size = input_shape[0]; const int batch_size = input_shape[0];
const int input_channels = input_shape[1]; const int input_channels = input_shape[1];
...@@ -990,11 +1223,28 @@ void Pool3dDirectCUDAFunctor<PoolProcess, T>::operator()( ...@@ -990,11 +1223,28 @@ void Pool3dDirectCUDAFunctor<PoolProcess, T>::operator()(
dim3 threads(thread_num, 1); dim3 threads(thread_num, 1);
dim3 grid(blocks, 1); dim3 grid(blocks, 1);
KernelPool3D<PoolProcess, T><<<grid, threads, 0, stream>>>( KernelPool3D<PoolProcess, T><<<grid, threads, 0, stream>>>(nthreads,
nthreads, input, input_channels, input_depth, input_height, input_width, input,
output_depth, output_height, output_width, ksize_depth, ksize_height, input_channels,
ksize_width, stride_depth, stride_height, stride_width, padding_depth, input_depth,
padding_height, padding_width, pool_compute, exclusive, adaptive, output); input_height,
input_width,
output_depth,
output_height,
output_width,
ksize_depth,
ksize_height,
ksize_width,
stride_depth,
stride_height,
stride_width,
padding_depth,
padding_height,
padding_width,
pool_compute,
exclusive,
adaptive,
output);
} }
/* /*
...@@ -1006,13 +1256,16 @@ void Pool3dDirectCUDAFunctor<PoolProcess, T>::operator()( ...@@ -1006,13 +1256,16 @@ void Pool3dDirectCUDAFunctor<PoolProcess, T>::operator()(
* height_up, height_down, width_left and width_right, respectively. * height_up, height_down, width_left and width_right, respectively.
*/ */
template <typename PoolProcess, class T> template <typename PoolProcess, class T>
class Pool3dFunctor<platform::CUDADeviceContext, PoolProcess, T> { class Pool3dFunctor<phi::GPUContext, PoolProcess, T> {
public: public:
void operator()(const platform::CUDADeviceContext& context, void operator()(const phi::GPUContext& context,
const framework::Tensor& input, const std::vector<int>& ksize, const DenseTensor& input,
const std::vector<int>& ksize,
const std::vector<int>& strides, const std::vector<int>& strides,
const std::vector<int>& paddings, bool exclusive, const std::vector<int>& paddings,
bool adaptive, framework::Tensor* output, bool exclusive,
bool adaptive,
DenseTensor* output,
PoolProcess pool_process) { PoolProcess pool_process) {
const int batch_size = input.dims()[0]; const int batch_size = input.dims()[0];
const int input_channels = input.dims()[1]; const int input_channels = input.dims()[1];
...@@ -1034,31 +1287,52 @@ class Pool3dFunctor<platform::CUDADeviceContext, PoolProcess, T> { ...@@ -1034,31 +1287,52 @@ class Pool3dFunctor<platform::CUDADeviceContext, PoolProcess, T> {
const int padding_width = paddings[2]; const int padding_width = paddings[2];
const T* input_data = input.data<T>(); const T* input_data = input.data<T>();
T* output_data = output->mutable_data<T>(context.GetPlace()); T* output_data = context.template Alloc<T>(output);
int nthreads = batch_size * output_channels * output_depth * output_height * int nthreads = batch_size * output_channels * output_depth * output_height *
output_width; output_width;
int thread_num = 1024; int thread_num = 1024;
#ifdef WITH_NV_JETSON #ifdef WITH_NV_JETSON
platform::ChangeThreadNum(context, &thread_num); paddle::platform::ChangeThreadNum(context, &thread_num);
#endif #endif
int blocks = (nthreads + thread_num - 1) / thread_num; int blocks = (nthreads + thread_num - 1) / thread_num;
dim3 threads(thread_num, 1); dim3 threads(thread_num, 1);
dim3 grid(blocks, 1); dim3 grid(blocks, 1);
KernelPool3D<PoolProcess, T><<<grid, threads, 0, context.stream()>>>( KernelPool3D<PoolProcess, T><<<grid, threads, 0, context.stream()>>>(
nthreads, input_data, input_channels, input_depth, input_height, nthreads,
input_width, output_depth, output_height, output_width, ksize_depth, input_data,
ksize_height, ksize_width, stride_depth, stride_height, stride_width, input_channels,
padding_depth, padding_height, padding_width, pool_process, exclusive, input_depth,
adaptive, output_data); input_height,
} input_width,
void operator()(const platform::CUDADeviceContext& context, output_depth,
const framework::Tensor& input, const std::vector<int>& ksize, output_height,
output_width,
ksize_depth,
ksize_height,
ksize_width,
stride_depth,
stride_height,
stride_width,
padding_depth,
padding_height,
padding_width,
pool_process,
exclusive,
adaptive,
output_data);
}
void operator()(const phi::GPUContext& context,
const DenseTensor& input,
const std::vector<int>& ksize,
const std::vector<int>& strides, const std::vector<int>& strides,
const std::vector<int>& paddings, const std::vector<int>& paddings,
const std::string data_format, bool exclusive, bool adaptive, const std::string data_format,
framework::Tensor* output, PoolProcess pool_process) { bool exclusive,
bool adaptive,
DenseTensor* output,
PoolProcess pool_process) {
bool channel_last = (data_format == "NDHWC"); bool channel_last = (data_format == "NDHWC");
const int batch_size = input.dims()[0]; const int batch_size = input.dims()[0];
...@@ -1089,24 +1363,42 @@ class Pool3dFunctor<platform::CUDADeviceContext, PoolProcess, T> { ...@@ -1089,24 +1363,42 @@ class Pool3dFunctor<platform::CUDADeviceContext, PoolProcess, T> {
const int padding_width = paddings[2]; const int padding_width = paddings[2];
const T* input_data = input.data<T>(); const T* input_data = input.data<T>();
T* output_data = output->mutable_data<T>(context.GetPlace()); T* output_data = context.template Alloc<T>(output);
int nthreads = batch_size * output_channels * output_depth * output_height * int nthreads = batch_size * output_channels * output_depth * output_height *
output_width; output_width;
int thread_num = 1024; int thread_num = 1024;
#ifdef WITH_NV_JETSON #ifdef WITH_NV_JETSON
platform::ChangeThreadNum(context, &thread_num); paddle::platform::ChangeThreadNum(context, &thread_num);
#endif #endif
int blocks = (nthreads + thread_num - 1) / thread_num; int blocks = (nthreads + thread_num - 1) / thread_num;
dim3 threads(thread_num, 1); dim3 threads(thread_num, 1);
dim3 grid(blocks, 1); dim3 grid(blocks, 1);
KernelPool3D<PoolProcess, T><<<grid, threads, 0, context.stream()>>>( KernelPool3D<PoolProcess, T><<<grid, threads, 0, context.stream()>>>(
nthreads, input_data, input_channels, input_depth, input_height, nthreads,
input_width, output_depth, output_height, output_width, ksize_depth, input_data,
ksize_height, ksize_width, stride_depth, stride_height, stride_width, input_channels,
padding_depth, padding_height, padding_width, pool_process, exclusive, input_depth,
adaptive, output_data, channel_last); input_height,
input_width,
output_depth,
output_height,
output_width,
ksize_depth,
ksize_height,
ksize_width,
stride_depth,
stride_height,
stride_width,
padding_depth,
padding_height,
padding_width,
pool_process,
exclusive,
adaptive,
output_data,
channel_last);
} }
}; };
...@@ -1119,16 +1411,18 @@ class Pool3dFunctor<platform::CUDADeviceContext, PoolProcess, T> { ...@@ -1119,16 +1411,18 @@ class Pool3dFunctor<platform::CUDADeviceContext, PoolProcess, T> {
* height_up, height_down, width_left and width_right, respectively. * height_up, height_down, width_left and width_right, respectively.
*/ */
template <typename PoolProcess, class T> template <typename PoolProcess, class T>
class Pool3dGradFunctor<platform::CUDADeviceContext, PoolProcess, T> { class Pool3dGradFunctor<phi::GPUContext, PoolProcess, T> {
public: public:
void operator()(const platform::CUDADeviceContext& context, void operator()(const phi::GPUContext& context,
const framework::Tensor& input, const DenseTensor& input,
const framework::Tensor& output, const DenseTensor& output,
const framework::Tensor& output_grad, const DenseTensor& output_grad,
const std::vector<int>& ksize, const std::vector<int>& ksize,
const std::vector<int>& strides, const std::vector<int>& strides,
const std::vector<int>& paddings, bool exclusive, const std::vector<int>& paddings,
bool adaptive, framework::Tensor* input_grad, bool exclusive,
bool adaptive,
DenseTensor* input_grad,
PoolProcess pool_process) { PoolProcess pool_process) {
const int batch_size = input.dims()[0]; const int batch_size = input.dims()[0];
const int input_channels = input.dims()[1]; const int input_channels = input.dims()[1];
...@@ -1152,7 +1446,7 @@ class Pool3dGradFunctor<platform::CUDADeviceContext, PoolProcess, T> { ...@@ -1152,7 +1446,7 @@ class Pool3dGradFunctor<platform::CUDADeviceContext, PoolProcess, T> {
const T* input_data = input.data<T>(); const T* input_data = input.data<T>();
const T* output_data = output.data<T>(); const T* output_data = output.data<T>();
const T* output_grad_data = output_grad.data<T>(); const T* output_grad_data = output_grad.data<T>();
T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace()); T* input_grad_data = context.template Alloc<T>(input_grad);
int nthreads = int nthreads =
batch_size * input_channels * input_depth * input_height * input_width; batch_size * input_channels * input_depth * input_height * input_width;
...@@ -1161,21 +1455,43 @@ class Pool3dGradFunctor<platform::CUDADeviceContext, PoolProcess, T> { ...@@ -1161,21 +1455,43 @@ class Pool3dGradFunctor<platform::CUDADeviceContext, PoolProcess, T> {
dim3 grid(blocks, 1); dim3 grid(blocks, 1);
KernelPool3DGrad<T, PoolProcess><<<grid, threads, 0, context.stream()>>>( KernelPool3DGrad<T, PoolProcess><<<grid, threads, 0, context.stream()>>>(
nthreads, input_data, output_data, output_grad_data, input_channels, nthreads,
input_depth, input_height, input_width, output_depth, output_height, input_data,
output_width, ksize_depth, ksize_height, ksize_width, stride_depth, output_data,
stride_height, stride_width, padding_depth, padding_height, output_grad_data,
padding_width, pool_process, exclusive, adaptive, input_grad_data); input_channels,
} input_depth,
void operator()(const platform::CUDADeviceContext& context, input_height,
const framework::Tensor& input, input_width,
const framework::Tensor& output, output_depth,
const framework::Tensor& output_grad, output_height,
output_width,
ksize_depth,
ksize_height,
ksize_width,
stride_depth,
stride_height,
stride_width,
padding_depth,
padding_height,
padding_width,
pool_process,
exclusive,
adaptive,
input_grad_data);
}
void operator()(const phi::GPUContext& context,
const DenseTensor& input,
const DenseTensor& output,
const DenseTensor& output_grad,
const std::vector<int>& ksize, const std::vector<int>& ksize,
const std::vector<int>& strides, const std::vector<int>& strides,
const std::vector<int>& paddings, const std::vector<int>& paddings,
const std::string data_format, bool exclusive, bool adaptive, const std::string data_format,
framework::Tensor* input_grad, PoolProcess pool_process) { bool exclusive,
bool adaptive,
DenseTensor* input_grad,
PoolProcess pool_process) {
bool channel_last = (data_format == "NDHWC"); bool channel_last = (data_format == "NDHWC");
const int batch_size = input.dims()[0]; const int batch_size = input.dims()[0];
...@@ -1206,7 +1522,7 @@ class Pool3dGradFunctor<platform::CUDADeviceContext, PoolProcess, T> { ...@@ -1206,7 +1522,7 @@ class Pool3dGradFunctor<platform::CUDADeviceContext, PoolProcess, T> {
const T* input_data = input.data<T>(); const T* input_data = input.data<T>();
const T* output_data = output.data<T>(); const T* output_data = output.data<T>();
const T* output_grad_data = output_grad.data<T>(); const T* output_grad_data = output_grad.data<T>();
T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace()); T* input_grad_data = context.template Alloc<T>(input_grad);
int nthreads = int nthreads =
batch_size * input_channels * input_depth * input_height * input_width; batch_size * input_channels * input_depth * input_height * input_width;
...@@ -1215,11 +1531,30 @@ class Pool3dGradFunctor<platform::CUDADeviceContext, PoolProcess, T> { ...@@ -1215,11 +1531,30 @@ class Pool3dGradFunctor<platform::CUDADeviceContext, PoolProcess, T> {
dim3 grid(blocks, 1); dim3 grid(blocks, 1);
KernelPool3DGrad<T, PoolProcess><<<grid, threads, 0, context.stream()>>>( KernelPool3DGrad<T, PoolProcess><<<grid, threads, 0, context.stream()>>>(
nthreads, input_data, output_data, output_grad_data, input_channels, nthreads,
input_depth, input_height, input_width, output_depth, output_height, input_data,
output_width, ksize_depth, ksize_height, ksize_width, stride_depth, output_data,
stride_height, stride_width, padding_depth, padding_height, output_grad_data,
padding_width, pool_process, exclusive, adaptive, input_grad_data, input_channels,
input_depth,
input_height,
input_width,
output_depth,
output_height,
output_width,
ksize_depth,
ksize_height,
ksize_width,
stride_depth,
stride_height,
stride_width,
padding_depth,
padding_height,
padding_width,
pool_process,
exclusive,
adaptive,
input_grad_data,
channel_last); // add channel_last channel_last); // add channel_last
} }
}; };
...@@ -1233,16 +1568,16 @@ class Pool3dGradFunctor<platform::CUDADeviceContext, PoolProcess, T> { ...@@ -1233,16 +1568,16 @@ class Pool3dGradFunctor<platform::CUDADeviceContext, PoolProcess, T> {
* height_up, height_down, width_left and width_right, respectively. * height_up, height_down, width_left and width_right, respectively.
*/ */
template <class T> template <class T>
class MaxPool3dGradFunctor<platform::CUDADeviceContext, T> { class MaxPool3dGradFunctor<phi::GPUContext, T> {
public: public:
void operator()(const platform::CUDADeviceContext& context, void operator()(const phi::GPUContext& context,
const framework::Tensor& input, const DenseTensor& input,
const framework::Tensor& output, const DenseTensor& output,
const framework::Tensor& output_grad, const DenseTensor& output_grad,
const std::vector<int>& ksize, const std::vector<int>& ksize,
const std::vector<int>& strides, const std::vector<int>& strides,
const std::vector<int>& paddings, const std::vector<int>& paddings,
framework::Tensor* input_grad) { DenseTensor* input_grad) {
const int batch_size = input.dims()[0]; const int batch_size = input.dims()[0];
const int input_channels = input.dims()[1]; const int input_channels = input.dims()[1];
const int input_depth = input.dims()[2]; const int input_depth = input.dims()[2];
...@@ -1265,7 +1600,7 @@ class MaxPool3dGradFunctor<platform::CUDADeviceContext, T> { ...@@ -1265,7 +1600,7 @@ class MaxPool3dGradFunctor<platform::CUDADeviceContext, T> {
const T* input_data = input.data<T>(); const T* input_data = input.data<T>();
const T* output_data = output.data<T>(); const T* output_data = output.data<T>();
const T* output_grad_data = output_grad.data<T>(); const T* output_grad_data = output_grad.data<T>();
T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace()); T* input_grad_data = context.template Alloc<T>(input_grad);
int nthreads = batch_size * output_channels * output_depth * output_height * int nthreads = batch_size * output_channels * output_depth * output_height *
output_width; output_width;
...@@ -1274,18 +1609,37 @@ class MaxPool3dGradFunctor<platform::CUDADeviceContext, T> { ...@@ -1274,18 +1609,37 @@ class MaxPool3dGradFunctor<platform::CUDADeviceContext, T> {
dim3 grid(blocks, 1); dim3 grid(blocks, 1);
KernelMaxPool3DGrad<T><<<grid, threads, 0, context.stream()>>>( KernelMaxPool3DGrad<T><<<grid, threads, 0, context.stream()>>>(
nthreads, input_data, output_data, output_grad_data, input_channels, nthreads,
input_depth, input_height, input_width, output_depth, output_height, input_data,
output_width, ksize_depth, ksize_height, ksize_width, stride_depth, output_data,
stride_height, stride_width, padding_depth, padding_height, output_grad_data,
padding_width, input_grad_data); input_channels,
} input_depth,
void operator()( input_height,
const platform::CUDADeviceContext& context, input_width,
const framework::Tensor& input, const framework::Tensor& output, output_depth,
const framework::Tensor& output_grad, const std::vector<int>& ksize, output_height,
const std::vector<int>& strides, const std::vector<int>& paddings, output_width,
const std::string data_format, framework::Tensor* input_grad) { ksize_depth,
ksize_height,
ksize_width,
stride_depth,
stride_height,
stride_width,
padding_depth,
padding_height,
padding_width,
input_grad_data);
}
void operator()(const phi::GPUContext& context,
const DenseTensor& input,
const DenseTensor& output,
const DenseTensor& output_grad,
const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings,
const std::string data_format,
DenseTensor* input_grad) {
bool channel_last = (data_format == "NDHWC"); bool channel_last = (data_format == "NDHWC");
const int batch_size = input.dims()[0]; const int batch_size = input.dims()[0];
...@@ -1316,7 +1670,7 @@ class MaxPool3dGradFunctor<platform::CUDADeviceContext, T> { ...@@ -1316,7 +1670,7 @@ class MaxPool3dGradFunctor<platform::CUDADeviceContext, T> {
const T* input_data = input.data<T>(); const T* input_data = input.data<T>();
const T* output_data = output.data<T>(); const T* output_data = output.data<T>();
const T* output_grad_data = output_grad.data<T>(); const T* output_grad_data = output_grad.data<T>();
T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace()); T* input_grad_data = context.template Alloc<T>(input_grad);
int nthreads = batch_size * output_channels * output_depth * output_height * int nthreads = batch_size * output_channels * output_depth * output_height *
output_width; output_width;
...@@ -1325,77 +1679,93 @@ class MaxPool3dGradFunctor<platform::CUDADeviceContext, T> { ...@@ -1325,77 +1679,93 @@ class MaxPool3dGradFunctor<platform::CUDADeviceContext, T> {
dim3 grid(blocks, 1); dim3 grid(blocks, 1);
KernelMaxPool3DGrad<T><<<grid, threads, 0, context.stream()>>>( KernelMaxPool3DGrad<T><<<grid, threads, 0, context.stream()>>>(
nthreads, input_data, output_data, output_grad_data, input_channels, nthreads,
input_depth, input_height, input_width, output_depth, output_height, input_data,
output_width, ksize_depth, ksize_height, ksize_width, stride_depth, output_data,
stride_height, stride_width, padding_depth, padding_height, output_grad_data,
padding_width, input_grad_data, channel_last); // add channel_last input_channels,
input_depth,
input_height,
input_width,
output_depth,
output_height,
output_width,
ksize_depth,
ksize_height,
ksize_width,
stride_depth,
stride_height,
stride_width,
padding_depth,
padding_height,
padding_width,
input_grad_data,
channel_last); // add channel_last
} }
}; };
template class Pool3dDirectCUDAFunctor<paddle::operators::math::MaxPool<float>, template class Pool3dDirectCUDAFunctor<MaxPool<float>, float>;
float>; template class Pool3dDirectCUDAFunctor<AvgPool<float>, float>;
template class Pool3dDirectCUDAFunctor<paddle::operators::math::AvgPool<float>,
float>; template class MaxPool3dGradFunctor<phi::GPUContext, float>;
template class MaxPool3dGradFunctor<phi::GPUContext, double>;
template class MaxPool3dGradFunctor<platform::CUDADeviceContext, float>; template class MaxPool3dGradFunctor<phi::GPUContext, dtype::float16>;
template class MaxPool3dGradFunctor<platform::CUDADeviceContext, double>;
template class MaxPool3dGradFunctor<platform::CUDADeviceContext, template class Pool3dFunctor<phi::GPUContext, MaxPool<float>, float>;
paddle::platform::float16>; template class Pool3dFunctor<phi::GPUContext, AvgPool<float>, float>;
template class Pool3dGradFunctor<phi::GPUContext, MaxPoolGrad<float>, float>;
template class Pool3dFunctor<platform::CUDADeviceContext, template class Pool3dGradFunctor<phi::GPUContext, AvgPoolGrad<float>, float>;
paddle::operators::math::MaxPool<float>, float>; template class Pool3dFunctor<phi::GPUContext, MaxPool<double>, double>;
template class Pool3dFunctor<platform::CUDADeviceContext, template class Pool3dFunctor<phi::GPUContext, AvgPool<double>, double>;
paddle::operators::math::AvgPool<float>, float>; template class Pool3dGradFunctor<phi::GPUContext, MaxPoolGrad<double>, double>;
template class Pool3dGradFunctor<platform::CUDADeviceContext, template class Pool3dGradFunctor<phi::GPUContext, AvgPoolGrad<double>, double>;
paddle::operators::math::MaxPoolGrad<float>,
float>; template class Pool3dFunctor<phi::GPUContext,
template class Pool3dGradFunctor<platform::CUDADeviceContext, MaxPool<dtype::float16>,
paddle::operators::math::AvgPoolGrad<float>, dtype::float16>;
float>; template class Pool3dFunctor<phi::GPUContext,
template class Pool3dFunctor<platform::CUDADeviceContext, AvgPool<dtype::float16>,
paddle::operators::math::MaxPool<double>, double>; dtype::float16>;
template class Pool3dFunctor<platform::CUDADeviceContext, template class Pool3dGradFunctor<phi::GPUContext,
paddle::operators::math::AvgPool<double>, double>; MaxPoolGrad<dtype::float16>,
template class Pool3dGradFunctor<platform::CUDADeviceContext, dtype::float16>;
paddle::operators::math::MaxPoolGrad<double>, template class Pool3dGradFunctor<phi::GPUContext,
double>; AvgPoolGrad<dtype::float16>,
template class Pool3dGradFunctor<platform::CUDADeviceContext, dtype::float16>;
paddle::operators::math::AvgPoolGrad<double>,
double>;
template class Pool3dFunctor<
platform::CUDADeviceContext,
paddle::operators::math::MaxPool<paddle::platform::float16>,
paddle::platform::float16>;
template class Pool3dFunctor<
platform::CUDADeviceContext,
paddle::operators::math::AvgPool<paddle::platform::float16>,
paddle::platform::float16>;
template class Pool3dGradFunctor<
platform::CUDADeviceContext,
paddle::operators::math::MaxPoolGrad<paddle::platform::float16>,
paddle::platform::float16>;
template class Pool3dGradFunctor<
platform::CUDADeviceContext,
paddle::operators::math::AvgPoolGrad<paddle::platform::float16>,
paddle::platform::float16>;
template <typename T1, typename T2> template <typename T1, typename T2>
__global__ void KernelMaxPool2dWithIdx( __global__ void KernelMaxPool2dWithIdx(const int nthreads,
const int nthreads, const T1* input_data, const int channels, const T1* input_data,
const int input_height, const int input_width, const int output_height, const int channels,
const int output_width, const int ksize_height, const int ksize_width, const int input_height,
const int stride_height, const int stride_width, const int padding_height, const int input_width,
const int padding_width, bool adaptive, T1* output_data, T2* mask_data, const int output_height,
const int output_width,
const int ksize_height,
const int ksize_width,
const int stride_height,
const int stride_width,
const int padding_height,
const int padding_width,
bool adaptive,
T1* output_data,
T2* mask_data,
FastDivModForPooling divmods) { FastDivModForPooling divmods) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) { index += blockDim.x * gridDim.x) {
int hstart, hend, wstart, wend; int hstart, hend, wstart, wend;
int w_offset, h_offset, c_offset, input_offset; int w_offset, h_offset, c_offset, input_offset;
OffsetPreparationFor4Dimension<FastDivModForPooling>( OffsetPreparationFor4Dimension<FastDivModForPooling>(index,
index, false, divmods, 0, 0, input_width, input_height, &w_offset, false,
&h_offset, &c_offset, &input_offset); divmods,
0,
0,
input_width,
input_height,
&w_offset,
&h_offset,
&c_offset,
&input_offset);
input_data += input_offset; input_data += input_offset;
if (adaptive) { if (adaptive) {
...@@ -1431,20 +1801,38 @@ __global__ void KernelMaxPool2dWithIdx( ...@@ -1431,20 +1801,38 @@ __global__ void KernelMaxPool2dWithIdx(
} }
template <typename T1, typename T2> template <typename T1, typename T2>
__global__ void KernelMaxPool2DWithIdxGrad( __global__ void KernelMaxPool2DWithIdxGrad(const int nthreads,
const int nthreads, const T1* output_grad, const T2* mask_data, const T1* output_grad,
const int channels, const int input_height, const int input_width, const T2* mask_data,
const int output_height, const int output_width, const int ksize_height, const int channels,
const int ksize_width, const int stride_height, const int stride_width, const int input_height,
const int padding_height, const int padding_width, bool adaptive, const int input_width,
T1* input_grad, FastDivModForPooling divmods) { const int output_height,
const int output_width,
const int ksize_height,
const int ksize_width,
const int stride_height,
const int stride_width,
const int padding_height,
const int padding_width,
bool adaptive,
T1* input_grad,
FastDivModForPooling divmods) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) { index += blockDim.x * gridDim.x) {
int phstart, phend, pwstart, pwend; int phstart, phend, pwstart, pwend;
int w_offset, h_offset, c_offset, output_offset; int w_offset, h_offset, c_offset, output_offset;
OffsetPreparationFor4Dimension<FastDivModForPooling>( OffsetPreparationFor4Dimension<FastDivModForPooling>(index,
index, false, divmods, 0, 0, output_width, output_height, &w_offset, false,
&h_offset, &c_offset, &output_offset); divmods,
0,
0,
output_width,
output_height,
&w_offset,
&h_offset,
&c_offset,
&output_offset);
mask_data += output_offset; mask_data += output_offset;
output_grad += output_offset; output_grad += output_offset;
...@@ -1487,13 +1875,16 @@ __global__ void KernelMaxPool2DWithIdxGrad( ...@@ -1487,13 +1875,16 @@ __global__ void KernelMaxPool2DWithIdxGrad(
* height and width, respectively. * height and width, respectively.
*/ */
template <typename T1, typename T2> template <typename T1, typename T2>
class MaxPool2dWithIndexFunctor<platform::CUDADeviceContext, T1, T2> { class MaxPool2dWithIndexFunctor<phi::GPUContext, T1, T2> {
public: public:
void operator()(const platform::CUDADeviceContext& context, void operator()(const phi::GPUContext& context,
const framework::Tensor& input, const std::vector<int>& ksize, const DenseTensor& input,
const std::vector<int>& ksize,
const std::vector<int>& strides, const std::vector<int>& strides,
const std::vector<int>& paddings, bool adaptive, const std::vector<int>& paddings,
framework::Tensor* output, framework::Tensor* mask) { bool adaptive,
DenseTensor* output,
DenseTensor* mask) {
const int batch_size = input.dims()[0]; const int batch_size = input.dims()[0];
const int input_channels = input.dims()[1]; const int input_channels = input.dims()[1];
const int input_height = input.dims()[2]; const int input_height = input.dims()[2];
...@@ -1509,13 +1900,13 @@ class MaxPool2dWithIndexFunctor<platform::CUDADeviceContext, T1, T2> { ...@@ -1509,13 +1900,13 @@ class MaxPool2dWithIndexFunctor<platform::CUDADeviceContext, T1, T2> {
const int padding_width = paddings[1]; const int padding_width = paddings[1];
const T1* input_data = input.data<T1>(); const T1* input_data = input.data<T1>();
T1* output_data = output->mutable_data<T1>(context.GetPlace()); T1* output_data = context.template Alloc<T1>(output);
T2* mask_data = mask->mutable_data<T2>(context.GetPlace()); T2* mask_data = context.template Alloc<T2>(mask);
int nthreads = batch_size * output_channels * output_height * output_width; int nthreads = batch_size * output_channels * output_height * output_width;
int thread_num = 1024; int thread_num = 1024;
#ifdef WITH_NV_JETSON #ifdef WITH_NV_JETSON
platform::ChangeThreadNum(context, &thread_num); paddle::platform::ChangeThreadNum(context, &thread_num);
#endif #endif
int blocks = (nthreads + thread_num - 1) / thread_num; int blocks = (nthreads + thread_num - 1) / thread_num;
...@@ -1525,10 +1916,23 @@ class MaxPool2dWithIndexFunctor<platform::CUDADeviceContext, T1, T2> { ...@@ -1525,10 +1916,23 @@ class MaxPool2dWithIndexFunctor<platform::CUDADeviceContext, T1, T2> {
auto pool_divmods = auto pool_divmods =
FastDivModForPooling(input_channels, output_width, output_height); FastDivModForPooling(input_channels, output_width, output_height);
KernelMaxPool2dWithIdx<T1, T2><<<grid, threads, 0, context.stream()>>>( KernelMaxPool2dWithIdx<T1, T2><<<grid, threads, 0, context.stream()>>>(
nthreads, input_data, input_channels, input_height, input_width, nthreads,
output_height, output_width, ksize_height, ksize_width, stride_height, input_data,
stride_width, padding_height, padding_width, adaptive, output_data, input_channels,
mask_data, pool_divmods); input_height,
input_width,
output_height,
output_width,
ksize_height,
ksize_width,
stride_height,
stride_width,
padding_height,
padding_width,
adaptive,
output_data,
mask_data,
pool_divmods);
} }
}; };
...@@ -1538,14 +1942,16 @@ class MaxPool2dWithIndexFunctor<platform::CUDADeviceContext, T1, T2> { ...@@ -1538,14 +1942,16 @@ class MaxPool2dWithIndexFunctor<platform::CUDADeviceContext, T1, T2> {
* height and width, respectively. * height and width, respectively.
*/ */
template <typename T1, typename T2> template <typename T1, typename T2>
class MaxPool2dWithIndexGradFunctor<platform::CUDADeviceContext, T1, T2> { class MaxPool2dWithIndexGradFunctor<phi::GPUContext, T1, T2> {
public: public:
void operator()(const platform::CUDADeviceContext& context, void operator()(const phi::GPUContext& context,
const framework::Tensor& output_grad, const DenseTensor& output_grad,
const framework::Tensor& mask, const std::vector<int>& ksize, const DenseTensor& mask,
const std::vector<int>& ksize,
const std::vector<int>& strides, const std::vector<int>& strides,
const std::vector<int>& paddings, bool adaptive, const std::vector<int>& paddings,
framework::Tensor* input_grad) { bool adaptive,
DenseTensor* input_grad) {
const int batch_size = input_grad->dims()[0]; const int batch_size = input_grad->dims()[0];
const int input_channels = input_grad->dims()[1]; const int input_channels = input_grad->dims()[1];
const int input_height = input_grad->dims()[2]; const int input_height = input_grad->dims()[2];
...@@ -1561,7 +1967,7 @@ class MaxPool2dWithIndexGradFunctor<platform::CUDADeviceContext, T1, T2> { ...@@ -1561,7 +1967,7 @@ class MaxPool2dWithIndexGradFunctor<platform::CUDADeviceContext, T1, T2> {
const T2* mask_data = mask.data<T2>(); const T2* mask_data = mask.data<T2>();
const T1* output_grad_data = output_grad.data<T1>(); const T1* output_grad_data = output_grad.data<T1>();
T1* input_grad_data = input_grad->mutable_data<T1>(context.GetPlace()); T1* input_grad_data = context.template Alloc<T1>(input_grad);
int nthreads = batch_size * input_channels * input_height * input_width; int nthreads = batch_size * input_channels * input_height * input_width;
int blocks = (nthreads + 1024 - 1) / 1024; int blocks = (nthreads + 1024 - 1) / 1024;
...@@ -1571,31 +1977,53 @@ class MaxPool2dWithIndexGradFunctor<platform::CUDADeviceContext, T1, T2> { ...@@ -1571,31 +1977,53 @@ class MaxPool2dWithIndexGradFunctor<platform::CUDADeviceContext, T1, T2> {
auto pool_divmods = auto pool_divmods =
FastDivModForPooling(input_channels, input_width, input_height); FastDivModForPooling(input_channels, input_width, input_height);
KernelMaxPool2DWithIdxGrad<T1, T2><<<grid, threads, 0, context.stream()>>>( KernelMaxPool2DWithIdxGrad<T1, T2><<<grid, threads, 0, context.stream()>>>(
nthreads, output_grad_data, mask_data, input_channels, input_height, nthreads,
input_width, output_height, output_width, ksize_height, ksize_width, output_grad_data,
stride_height, stride_width, padding_height, padding_width, adaptive, mask_data,
input_grad_data, pool_divmods); input_channels,
input_height,
input_width,
output_height,
output_width,
ksize_height,
ksize_width,
stride_height,
stride_width,
padding_height,
padding_width,
adaptive,
input_grad_data,
pool_divmods);
} }
}; };
template class MaxPool2dWithIndexFunctor<platform::CUDADeviceContext, float, template class MaxPool2dWithIndexFunctor<phi::GPUContext, float, int>;
int>; template class MaxPool2dWithIndexGradFunctor<phi::GPUContext, float, int>;
template class MaxPool2dWithIndexGradFunctor<platform::CUDADeviceContext, float, template class MaxPool2dWithIndexFunctor<phi::GPUContext, double, int>;
int>; template class MaxPool2dWithIndexGradFunctor<phi::GPUContext, double, int>;
template class MaxPool2dWithIndexFunctor<platform::CUDADeviceContext, double,
int>;
template class MaxPool2dWithIndexGradFunctor<platform::CUDADeviceContext,
double, int>;
template <typename T1, typename T2> template <typename T1, typename T2>
__global__ void KernelMaxPool3DWithIdx( __global__ void KernelMaxPool3DWithIdx(const int nthreads,
const int nthreads, const T1* input_data, const int channels, const T1* input_data,
const int input_depth, const int input_height, const int input_width, const int channels,
const int output_depth, const int output_height, const int output_width, const int input_depth,
const int ksize_depth, const int ksize_height, const int ksize_width, const int input_height,
const int stride_depth, const int stride_height, const int stride_width, const int input_width,
const int padding_depth, const int padding_height, const int padding_width, const int output_depth,
bool adaptive, T1* output_data, T2* mask_data) { const int output_height,
const int output_width,
const int ksize_depth,
const int ksize_height,
const int ksize_width,
const int stride_depth,
const int stride_height,
const int stride_width,
const int padding_depth,
const int padding_height,
const int padding_width,
bool adaptive,
T1* output_data,
T2* mask_data) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) { index += blockDim.x * gridDim.x) {
int pw = index % output_width; int pw = index % output_width;
...@@ -1650,14 +2078,27 @@ __global__ void KernelMaxPool3DWithIdx( ...@@ -1650,14 +2078,27 @@ __global__ void KernelMaxPool3DWithIdx(
} }
template <typename T1, typename T2> template <typename T1, typename T2>
__global__ void KernelMaxPool3DWithIdxGrad( __global__ void KernelMaxPool3DWithIdxGrad(const int nthreads,
const int nthreads, const T1* output_grad, const T2* mask, const T1* output_grad,
const int channels, const int input_depth, const int input_height, const T2* mask,
const int input_width, const int output_depth, const int output_height, const int channels,
const int output_width, const int ksize_depth, const int ksize_height, const int input_depth,
const int ksize_width, const int stride_depth, const int stride_height, const int input_height,
const int stride_width, const int padding_depth, const int padding_height, const int input_width,
const int padding_width, bool adaptive, T1* input_grad) { const int output_depth,
const int output_height,
const int output_width,
const int ksize_depth,
const int ksize_height,
const int ksize_width,
const int stride_depth,
const int stride_height,
const int stride_width,
const int padding_depth,
const int padding_height,
const int padding_width,
bool adaptive,
T1* input_grad) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) { index += blockDim.x * gridDim.x) {
int w_offset = index % input_width; int w_offset = index % input_width;
...@@ -1727,13 +2168,16 @@ __global__ void KernelMaxPool3DWithIdxGrad( ...@@ -1727,13 +2168,16 @@ __global__ void KernelMaxPool3DWithIdxGrad(
* depth, height and width, respectively. * depth, height and width, respectively.
*/ */
template <typename T1, typename T2> template <typename T1, typename T2>
class MaxPool3dWithIndexFunctor<platform::CUDADeviceContext, T1, T2> { class MaxPool3dWithIndexFunctor<phi::GPUContext, T1, T2> {
public: public:
void operator()(const platform::CUDADeviceContext& context, void operator()(const phi::GPUContext& context,
const framework::Tensor& input, const std::vector<int>& ksize, const DenseTensor& input,
const std::vector<int>& ksize,
const std::vector<int>& strides, const std::vector<int>& strides,
const std::vector<int>& paddings, bool adaptive, const std::vector<int>& paddings,
framework::Tensor* output, framework::Tensor* mask) { bool adaptive,
DenseTensor* output,
DenseTensor* mask) {
const int batch_size = input.dims()[0]; const int batch_size = input.dims()[0];
const int input_channels = input.dims()[1]; const int input_channels = input.dims()[1];
const int input_depth = input.dims()[2]; const int input_depth = input.dims()[2];
...@@ -1754,14 +2198,14 @@ class MaxPool3dWithIndexFunctor<platform::CUDADeviceContext, T1, T2> { ...@@ -1754,14 +2198,14 @@ class MaxPool3dWithIndexFunctor<platform::CUDADeviceContext, T1, T2> {
const int padding_width = paddings[2]; const int padding_width = paddings[2];
const T1* input_data = input.data<T1>(); const T1* input_data = input.data<T1>();
T1* output_data = output->mutable_data<T1>(context.GetPlace()); T1* output_data = context.template Alloc<T1>(output);
T2* mask_data = mask->mutable_data<T2>(context.GetPlace()); T2* mask_data = context.template Alloc<T2>(mask);
int nthreads = batch_size * output_channels * output_depth * output_height * int nthreads = batch_size * output_channels * output_depth * output_height *
output_width; output_width;
int thread_num = 1024; int thread_num = 1024;
#ifdef WITH_NV_JETSON #ifdef WITH_NV_JETSON
platform::ChangeThreadNum(context, &thread_num); paddle::platform::ChangeThreadNum(context, &thread_num);
#endif #endif
int blocks = (nthreads + thread_num - 1) / thread_num; int blocks = (nthreads + thread_num - 1) / thread_num;
...@@ -1769,10 +2213,26 @@ class MaxPool3dWithIndexFunctor<platform::CUDADeviceContext, T1, T2> { ...@@ -1769,10 +2213,26 @@ class MaxPool3dWithIndexFunctor<platform::CUDADeviceContext, T1, T2> {
dim3 grid(blocks, 1); dim3 grid(blocks, 1);
KernelMaxPool3DWithIdx<T1, T2><<<grid, threads, 0, context.stream()>>>( KernelMaxPool3DWithIdx<T1, T2><<<grid, threads, 0, context.stream()>>>(
nthreads, input_data, input_channels, input_depth, input_height, nthreads,
input_width, output_depth, output_height, output_width, ksize_depth, input_data,
ksize_height, ksize_width, stride_depth, stride_height, stride_width, input_channels,
padding_depth, padding_height, padding_width, adaptive, output_data, input_depth,
input_height,
input_width,
output_depth,
output_height,
output_width,
ksize_depth,
ksize_height,
ksize_width,
stride_depth,
stride_height,
stride_width,
padding_depth,
padding_height,
padding_width,
adaptive,
output_data,
mask_data); mask_data);
} }
}; };
...@@ -1783,14 +2243,16 @@ class MaxPool3dWithIndexFunctor<platform::CUDADeviceContext, T1, T2> { ...@@ -1783,14 +2243,16 @@ class MaxPool3dWithIndexFunctor<platform::CUDADeviceContext, T1, T2> {
* depth, height and width, respectively. * depth, height and width, respectively.
*/ */
template <typename T1, typename T2> template <typename T1, typename T2>
class MaxPool3dWithIndexGradFunctor<platform::CUDADeviceContext, T1, T2> { class MaxPool3dWithIndexGradFunctor<phi::GPUContext, T1, T2> {
public: public:
void operator()(const platform::CUDADeviceContext& context, void operator()(const phi::GPUContext& context,
const framework::Tensor& output_grad, const DenseTensor& output_grad,
const framework::Tensor& mask, const std::vector<int>& ksize, const DenseTensor& mask,
const std::vector<int>& ksize,
const std::vector<int>& strides, const std::vector<int>& strides,
const std::vector<int>& paddings, bool adaptive, const std::vector<int>& paddings,
framework::Tensor* input_grad) { bool adaptive,
DenseTensor* input_grad) {
const int batch_size = input_grad->dims()[0]; const int batch_size = input_grad->dims()[0];
const int input_channels = input_grad->dims()[1]; const int input_channels = input_grad->dims()[1];
const int input_depth = input_grad->dims()[2]; const int input_depth = input_grad->dims()[2];
...@@ -1811,7 +2273,7 @@ class MaxPool3dWithIndexGradFunctor<platform::CUDADeviceContext, T1, T2> { ...@@ -1811,7 +2273,7 @@ class MaxPool3dWithIndexGradFunctor<platform::CUDADeviceContext, T1, T2> {
const T1* output_grad_data = output_grad.data<T1>(); const T1* output_grad_data = output_grad.data<T1>();
const T2* mask_data = mask.data<T2>(); const T2* mask_data = mask.data<T2>();
T1* input_grad_data = input_grad->mutable_data<T1>(context.GetPlace()); T1* input_grad_data = context.template Alloc<T1>(input_grad);
int nthreads = int nthreads =
batch_size * input_channels * input_depth * input_height * input_width; batch_size * input_channels * input_depth * input_height * input_width;
...@@ -1820,23 +2282,34 @@ class MaxPool3dWithIndexGradFunctor<platform::CUDADeviceContext, T1, T2> { ...@@ -1820,23 +2282,34 @@ class MaxPool3dWithIndexGradFunctor<platform::CUDADeviceContext, T1, T2> {
dim3 grid(blocks, 1); dim3 grid(blocks, 1);
KernelMaxPool3DWithIdxGrad<T1, T2><<<grid, threads, 0, context.stream()>>>( KernelMaxPool3DWithIdxGrad<T1, T2><<<grid, threads, 0, context.stream()>>>(
nthreads, output_grad_data, mask_data, input_channels, input_depth, nthreads,
input_height, input_width, output_depth, output_height, output_width, output_grad_data,
ksize_depth, ksize_height, ksize_width, stride_depth, stride_height, mask_data,
stride_width, padding_depth, padding_height, padding_width, adaptive, input_channels,
input_depth,
input_height,
input_width,
output_depth,
output_height,
output_width,
ksize_depth,
ksize_height,
ksize_width,
stride_depth,
stride_height,
stride_width,
padding_depth,
padding_height,
padding_width,
adaptive,
input_grad_data); input_grad_data);
} }
}; };
template class MaxPool3dWithIndexFunctor<platform::CUDADeviceContext, float, template class MaxPool3dWithIndexFunctor<phi::GPUContext, float, int>;
int>; template class MaxPool3dWithIndexGradFunctor<phi::GPUContext, float, int>;
template class MaxPool3dWithIndexGradFunctor<platform::CUDADeviceContext, float, template class MaxPool3dWithIndexFunctor<phi::GPUContext, double, int>;
int>; template class MaxPool3dWithIndexGradFunctor<phi::GPUContext, double, int>;
template class MaxPool3dWithIndexFunctor<platform::CUDADeviceContext, double,
int>; } // namespace funcs
template class MaxPool3dWithIndexGradFunctor<platform::CUDADeviceContext, } // namespace phi
double, int>;
} // namespace math
} // namespace operators
} // namespace paddle
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. /* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
...@@ -13,19 +13,21 @@ See the License for the specific language governing permissions and ...@@ -13,19 +13,21 @@ See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#pragma once #pragma once
#include <algorithm>
#include <string> #include <string>
#include <vector> #include <vector>
#include "paddle/fluid/platform/macros.h" // import FLT_MAX
#include "paddle/fluid/framework/eigen.h" #include "paddle/phi/common/amp_type_traits.h"
#include "paddle/fluid/framework/tensor.h" #include "paddle/phi/core/dense_tensor.h"
#include "paddle/fluid/operators/amp/fp16_type_traits.h"
#include "paddle/fluid/platform/device_context.h"
#include "paddle/fluid/platform/macros.h"
#include "paddle/phi/core/hostdevice.h" #include "paddle/phi/core/hostdevice.h"
namespace paddle { #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
namespace operators { #include "paddle/phi/backends/gpu/gpu_decls.h"
namespace math { #endif
namespace phi {
namespace funcs {
/* /*
* \brief Extracting simple operations from pooling. * \brief Extracting simple operations from pooling.
...@@ -47,7 +49,7 @@ class MaxPool { ...@@ -47,7 +49,7 @@ class MaxPool {
template <class T> template <class T>
class AvgPool { class AvgPool {
using MT = typename details::MPTypeTrait<T>::Type; using MT = typename dtype::MPTypeTrait<T>::Type;
MT intermediate_res; MT intermediate_res;
public: public:
...@@ -69,8 +71,8 @@ template <class T> ...@@ -69,8 +71,8 @@ template <class T>
class MaxPoolGrad { class MaxPoolGrad {
public: public:
static constexpr bool use_x = true; static constexpr bool use_x = true;
HOSTDEVICE inline void compute(const T& x, const T& y, const T& dy, T scale, HOSTDEVICE inline void compute(
T* dx) { const T& x, const T& y, const T& dy, T scale, T* dx) {
*dx += dy * static_cast<T>(x == y); *dx += dy * static_cast<T>(x == y);
} }
}; };
...@@ -79,8 +81,8 @@ template <class T> ...@@ -79,8 +81,8 @@ template <class T>
class AvgPoolGrad { class AvgPoolGrad {
public: public:
static constexpr bool use_x = false; static constexpr bool use_x = false;
HOSTDEVICE inline void compute(const T& x, const T& y, const T& dy, T scale, HOSTDEVICE inline void compute(
T* dx) { const T& x, const T& y, const T& dy, T scale, T* dx) {
*dx += (scale * dy); *dx += (scale * dy);
} }
}; };
...@@ -116,149 +118,191 @@ HOSTDEVICE inline int AdaptEndIndex(int ph, int input_size, int output_size) { ...@@ -116,149 +118,191 @@ HOSTDEVICE inline int AdaptEndIndex(int ph, int input_size, int output_size) {
template <typename PoolProcess, typename T> template <typename PoolProcess, typename T>
class Pool2dDirectCUDAFunctor { class Pool2dDirectCUDAFunctor {
public: public:
void operator()(const T* input, const std::vector<int>& input_shape, void operator()(const T* input,
const std::vector<int>& input_shape,
const std::vector<int>& output_shape, const std::vector<int>& output_shape,
const std::vector<int>& ksize, const std::vector<int>& ksize,
const std::vector<int>& strides, const std::vector<int>& strides,
const std::vector<int>& paddings, bool exclusive, const std::vector<int>& paddings,
bool adaptive, T* output, gpuStream_t stream, bool exclusive,
bool adaptive,
T* output,
gpuStream_t stream,
PoolProcess pool_compute); PoolProcess pool_compute);
}; };
#endif #endif
template <typename DeviceContext, typename PoolProcess, typename T> template <typename Context, typename PoolProcess, typename T>
class Pool2dFunctor { class Pool2dFunctor {
public: public:
void operator()(const DeviceContext& context, const framework::Tensor& input, void operator()(const Context& context,
const DenseTensor& input,
const std::vector<int>& ksize, const std::vector<int>& ksize,
const std::vector<int>& strides, const std::vector<int>& strides,
const std::vector<int>& paddings, bool exclusive, const std::vector<int>& paddings,
bool adaptive, framework::Tensor* output, bool exclusive,
bool adaptive,
DenseTensor* output,
PoolProcess pool_compute); PoolProcess pool_compute);
// overload operator() to support argument data_format // overload operator() to support argument data_format
void operator()(const DeviceContext& context, const framework::Tensor& input, void operator()(const Context& context,
const DenseTensor& input,
const std::vector<int>& ksize, const std::vector<int>& ksize,
const std::vector<int>& strides, const std::vector<int>& strides,
const std::vector<int>& paddings, const std::vector<int>& paddings,
const std::string data_format, bool exclusive, bool adaptive, const std::string data_format,
framework::Tensor* output, PoolProcess pool_compute); bool exclusive,
bool adaptive,
DenseTensor* output,
PoolProcess pool_compute);
}; };
template <typename DeviceContext, typename PoolProcess, typename T> template <typename Context, typename PoolProcess, typename T>
class Pool2dGradFunctor { class Pool2dGradFunctor {
public: public:
void operator()(const DeviceContext& context, const framework::Tensor& input, void operator()(const Context& context,
const framework::Tensor& output, const DenseTensor& input,
const framework::Tensor& output_grad, const DenseTensor& output,
const DenseTensor& output_grad,
const std::vector<int>& ksize, const std::vector<int>& ksize,
const std::vector<int>& strides, const std::vector<int>& strides,
const std::vector<int>& paddings, bool exclusive, const std::vector<int>& paddings,
bool adaptive, framework::Tensor* input_grad, bool exclusive,
bool adaptive,
DenseTensor* input_grad,
PoolProcess pool_compute); PoolProcess pool_compute);
// overload operator() to support argument data_format // overload operator() to support argument data_format
void operator()(const DeviceContext& context, const framework::Tensor& input, void operator()(const Context& context,
const framework::Tensor& output, const DenseTensor& input,
const framework::Tensor& output_grad, const DenseTensor& output,
const DenseTensor& output_grad,
const std::vector<int>& ksize, const std::vector<int>& ksize,
const std::vector<int>& strides, const std::vector<int>& strides,
const std::vector<int>& paddings, const std::vector<int>& paddings,
const std::string data_format, bool exclusive, bool adaptive, const std::string data_format,
framework::Tensor* input_grad, PoolProcess pool_compute); bool exclusive,
bool adaptive,
DenseTensor* input_grad,
PoolProcess pool_compute);
}; };
template <typename DeviceContext, class T> template <typename Context, class T>
class MaxPool2dGradFunctor { class MaxPool2dGradFunctor {
public: public:
void operator()(const DeviceContext& context, const framework::Tensor& input, void operator()(const Context& context,
const framework::Tensor& output, const DenseTensor& input,
const framework::Tensor& output_grad, const DenseTensor& output,
const DenseTensor& output_grad,
const std::vector<int>& ksize, const std::vector<int>& ksize,
const std::vector<int>& strides, const std::vector<int>& strides,
const std::vector<int>& paddings, const std::vector<int>& paddings,
framework::Tensor* input_grad); DenseTensor* input_grad);
// overload operator() to support argument data_format // overload operator() to support argument data_format
void operator()(const DeviceContext& context, const framework::Tensor& input, void operator()(const Context& context,
const framework::Tensor& output, const DenseTensor& input,
const framework::Tensor& output_grad, const DenseTensor& output,
const DenseTensor& output_grad,
const std::vector<int>& ksize, const std::vector<int>& ksize,
const std::vector<int>& strides, const std::vector<int>& strides,
const std::vector<int>& paddings, const std::vector<int>& paddings,
const std::string data_format, framework::Tensor* input_grad); const std::string data_format,
DenseTensor* input_grad);
}; };
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
template <typename PoolProcess, typename T> template <typename PoolProcess, typename T>
class Pool3dDirectCUDAFunctor { class Pool3dDirectCUDAFunctor {
public: public:
void operator()(const T* input, const std::vector<int>& input_shape, void operator()(const T* input,
const std::vector<int>& input_shape,
const std::vector<int>& output_shape, const std::vector<int>& output_shape,
const std::vector<int>& ksize, const std::vector<int>& ksize,
const std::vector<int>& strides, const std::vector<int>& strides,
const std::vector<int>& paddings, bool exclusive, const std::vector<int>& paddings,
bool adaptive, T* output, gpuStream_t stream, bool exclusive,
bool adaptive,
T* output,
gpuStream_t stream,
PoolProcess pool_compute); PoolProcess pool_compute);
}; };
#endif #endif
template <typename DeviceContext, typename PoolProcess, typename T> template <typename Context, typename PoolProcess, typename T>
class Pool3dFunctor { class Pool3dFunctor {
public: public:
void operator()(const DeviceContext& context, const framework::Tensor& input, void operator()(const Context& context,
const DenseTensor& input,
const std::vector<int>& ksize, const std::vector<int>& ksize,
const std::vector<int>& strides, const std::vector<int>& strides,
const std::vector<int>& paddings, bool exclusive, const std::vector<int>& paddings,
bool adaptive, framework::Tensor* output, bool exclusive,
bool adaptive,
DenseTensor* output,
PoolProcess pool_compute); PoolProcess pool_compute);
// overload operator() to support argument data_format // overload operator() to support argument data_format
void operator()(const DeviceContext& context, const framework::Tensor& input, void operator()(const Context& context,
const DenseTensor& input,
const std::vector<int>& ksize, const std::vector<int>& ksize,
const std::vector<int>& strides, const std::vector<int>& strides,
const std::vector<int>& paddings, const std::vector<int>& paddings,
const std::string data_format, bool exclusive, bool adaptive, const std::string data_format,
framework::Tensor* output, PoolProcess pool_compute); bool exclusive,
bool adaptive,
DenseTensor* output,
PoolProcess pool_compute);
}; };
template <typename DeviceContext, typename PoolProcess, typename T> template <typename Context, typename PoolProcess, typename T>
class Pool3dGradFunctor { class Pool3dGradFunctor {
public: public:
void operator()(const DeviceContext& context, const framework::Tensor& input, void operator()(const Context& context,
const framework::Tensor& output, const DenseTensor& input,
const framework::Tensor& output_grad, const DenseTensor& output,
const DenseTensor& output_grad,
const std::vector<int>& ksize, const std::vector<int>& ksize,
const std::vector<int>& strides, const std::vector<int>& strides,
const std::vector<int>& paddings, bool exclusive, const std::vector<int>& paddings,
bool adaptive, framework::Tensor* input_grad, bool exclusive,
bool adaptive,
DenseTensor* input_grad,
PoolProcess pool_compute); PoolProcess pool_compute);
// overload operator() to support argument data_format // overload operator() to support argument data_format
void operator()(const DeviceContext& context, const framework::Tensor& input, void operator()(const Context& context,
const framework::Tensor& output, const DenseTensor& input,
const framework::Tensor& output_grad, const DenseTensor& output,
const DenseTensor& output_grad,
const std::vector<int>& ksize, const std::vector<int>& ksize,
const std::vector<int>& strides, const std::vector<int>& strides,
const std::vector<int>& paddings, const std::vector<int>& paddings,
const std::string data_format, bool exclusive, bool adaptive, const std::string data_format,
framework::Tensor* input_grad, PoolProcess pool_compute); bool exclusive,
bool adaptive,
DenseTensor* input_grad,
PoolProcess pool_compute);
}; };
template <typename DeviceContext, class T> template <typename Context, class T>
class MaxPool3dGradFunctor { class MaxPool3dGradFunctor {
public: public:
void operator()(const DeviceContext& context, const framework::Tensor& input, void operator()(const Context& context,
const framework::Tensor& output, const DenseTensor& input,
const framework::Tensor& output_grad, const DenseTensor& output,
const DenseTensor& output_grad,
const std::vector<int>& ksize, const std::vector<int>& ksize,
const std::vector<int>& strides, const std::vector<int>& strides,
const std::vector<int>& paddings, const std::vector<int>& paddings,
framework::Tensor* input_grad); DenseTensor* input_grad);
// overload operator() to support argument data_format // overload operator() to support argument data_format
void operator()(const DeviceContext& context, const framework::Tensor& input, void operator()(const Context& context,
const framework::Tensor& output, const DenseTensor& input,
const framework::Tensor& output_grad, const DenseTensor& output,
const DenseTensor& output_grad,
const std::vector<int>& ksize, const std::vector<int>& ksize,
const std::vector<int>& strides, const std::vector<int>& strides,
const std::vector<int>& paddings, const std::vector<int>& paddings,
const std::string data_format, framework::Tensor* input_grad); const std::string data_format,
DenseTensor* input_grad);
}; };
/* /*
...@@ -268,48 +312,158 @@ class MaxPool3dGradFunctor { ...@@ -268,48 +312,158 @@ class MaxPool3dGradFunctor {
* In pool2d, all tensors are in NCHW format. In pool3d, all tensors are in * In pool2d, all tensors are in NCHW format. In pool3d, all tensors are in
* NCDHW format. * NCDHW format.
*/ */
template <typename DeviceContext, typename T1, typename T2> template <typename Context, typename T1, typename T2>
class MaxPool2dWithIndexFunctor { class MaxPool2dWithIndexFunctor {
public: public:
void operator()(const DeviceContext& context, const framework::Tensor& input, void operator()(const Context& context,
const DenseTensor& input,
const std::vector<int>& ksize, const std::vector<int>& ksize,
const std::vector<int>& strides, const std::vector<int>& strides,
const std::vector<int>& paddings, bool adaptive, const std::vector<int>& paddings,
framework::Tensor* output, framework::Tensor* mask); bool adaptive,
DenseTensor* output,
DenseTensor* mask);
}; };
template <typename DeviceContext, typename T1, typename T2> template <typename Context, typename T1, typename T2>
class MaxPool2dWithIndexGradFunctor { class MaxPool2dWithIndexGradFunctor {
public: public:
void operator()(const DeviceContext& context, void operator()(const Context& context,
const framework::Tensor& output_grad, const DenseTensor& output_grad,
const framework::Tensor& mask, const std::vector<int>& ksize, const DenseTensor& mask,
const std::vector<int>& ksize,
const std::vector<int>& strides, const std::vector<int>& strides,
const std::vector<int>& paddings, bool adaptive, const std::vector<int>& paddings,
framework::Tensor* input_grad); bool adaptive,
DenseTensor* input_grad);
}; };
template <typename DeviceContext, typename T1, typename T2> template <typename Context, typename T1, typename T2>
class MaxPool3dWithIndexFunctor { class MaxPool3dWithIndexFunctor {
public: public:
void operator()(const DeviceContext& context, const framework::Tensor& input, void operator()(const Context& context,
const DenseTensor& input,
const std::vector<int>& ksize, const std::vector<int>& ksize,
const std::vector<int>& strides, const std::vector<int>& strides,
const std::vector<int>& paddings, bool adaptive, const std::vector<int>& paddings,
framework::Tensor* output, framework::Tensor* mask); bool adaptive,
DenseTensor* output,
DenseTensor* mask);
}; };
template <typename DeviceContext, typename T1, typename T2> template <typename Context, typename T1, typename T2>
class MaxPool3dWithIndexGradFunctor { class MaxPool3dWithIndexGradFunctor {
public: public:
void operator()(const DeviceContext& context, void operator()(const Context& context,
const framework::Tensor& output_grad, const DenseTensor& output_grad,
const framework::Tensor& mask, const std::vector<int>& ksize, const DenseTensor& mask,
const std::vector<int>& ksize,
const std::vector<int>& strides, const std::vector<int>& strides,
const std::vector<int>& paddings, bool adaptive, const std::vector<int>& paddings,
framework::Tensor* input_grad); bool adaptive,
DenseTensor* input_grad);
}; };
} // namespace math inline int PoolOutputSize(int input_size,
} // namespace operators int filter_size,
} // namespace paddle int padding_1,
int padding_2,
int stride,
bool ceil_mode) {
int output_size;
if (!ceil_mode) {
output_size =
(input_size - filter_size + padding_1 + padding_2) / stride + 1;
} else {
output_size =
(input_size - filter_size + padding_1 + padding_2 + stride - 1) /
stride +
1;
}
PADDLE_ENFORCE_GT(
output_size,
0,
errors::InvalidArgument(
"the output size must be greater than 0. But received: "
"output_size = %d due to the settings of input_size(%d), "
"padding(%d,%d), "
"k_size(%d) and stride(%d). Please check again!",
output_size,
input_size,
padding_1,
padding_2,
filter_size,
stride));
return output_size;
}
inline int MaxPoolOutputSize(int input_size,
int filter_size,
int padding,
int stride) {
int output_size = (input_size - filter_size + 2 * padding) / stride + 1;
return output_size;
}
template <typename T = int>
inline void UpdatePadding(std::vector<T>* paddings,
const bool global_pooling,
const bool adaptive,
const std::string padding_algorithm,
const DDim data_dims,
const std::vector<T>& strides,
const std::vector<T>& kernel_size) {
// set padding size == data_dims.size() * 2
auto data_shape = vectorize<T>(data_dims);
if (static_cast<int>(paddings->size()) == data_dims.size()) {
for (int i = 0; i < data_dims.size(); ++i) {
T copy_pad = *(paddings->begin() + 2 * i);
paddings->insert(paddings->begin() + 2 * i + 1, copy_pad);
}
} else {
PADDLE_ENFORCE_EQ(data_dims.size() * 2,
paddings->size(),
errors::InvalidArgument(
"Paddings size %d should be the same or twice as the "
"pooling size %d.",
paddings->size(),
data_dims.size() * 2));
}
// when padding_algorithm is "VALID" or "SAME"
if (padding_algorithm == "SAME") {
for (int i = 0; i < data_dims.size(); ++i) {
T out_size = (data_dims[i] + strides[i] - 1) / strides[i];
T pad_sum =
std::max((out_size - 1) * strides[i] + kernel_size[i] - data_shape[i],
static_cast<T>(0));
T pad_0 = pad_sum / 2;
T pad_1 = pad_sum - pad_0;
*(paddings->begin() + i * 2) = pad_0;
*(paddings->begin() + i * 2 + 1) = pad_1;
}
} else if (padding_algorithm == "VALID") {
for (auto it = paddings->begin(); it != paddings->end(); it++) {
*it = 0;
}
}
// if global_pooling == true or adaptive == true, padding will be ignore
if (global_pooling || adaptive) {
for (auto it = paddings->begin(); it != paddings->end(); it++) {
*it = 0;
}
}
}
template <typename T = int>
inline void UpdateKernelSize(std::vector<T>* kernel_size,
const DDim data_dims) {
kernel_size->resize(static_cast<size_t>(data_dims.size()));
for (size_t i = 0; i < kernel_size->size(); ++i) {
*(kernel_size->begin() + i) = static_cast<T>(data_dims[i]);
}
}
} // namespace funcs
} // namespace phi
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/pool_grad_kernel.h"
#include "paddle/phi/kernels/impl/pool_grad_kernel_impl.h"
#include "paddle/phi/common/float16.h"
#include "paddle/phi/core/kernel_registry.h"
PD_REGISTER_KERNEL(pool2d_grad,
GPU,
ALL_LAYOUT,
phi::Pool2dGradKernel,
float,
double,
phi::dtype::float16) {}
PD_REGISTER_KERNEL(pool2d_double_grad,
GPU,
ALL_LAYOUT,
phi::Pool2dDoubleGradKernel,
float,
double) {}
PD_REGISTER_KERNEL(max_pool2d_with_index_grad,
GPU,
ALL_LAYOUT,
phi::MaxPool2dWithIndexGradKernel,
float,
double) {
kernel->InputAt(1).SetDataType(
paddle::experimental::CppTypeToDataType<int>::Type());
}
PD_REGISTER_KERNEL(pool3d_grad,
GPU,
ALL_LAYOUT,
phi::Pool3dGradKernel,
float,
double,
phi::dtype::float16) {}
PD_REGISTER_KERNEL(max_pool3d_with_index_grad,
GPU,
ALL_LAYOUT,
phi::MaxPool3dWithIndexGradKernel,
float,
double) {
kernel->InputAt(1).SetDataType(
paddle::experimental::CppTypeToDataType<int>::Type());
}
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/pool_kernel.h"
#include "paddle/phi/kernels/impl/pool_kernel_impl.h"
#include "paddle/phi/common/float16.h"
#include "paddle/phi/core/kernel_registry.h"
PD_REGISTER_KERNEL(pool2d,
GPU,
ALL_LAYOUT,
phi::Pool2dKernel,
float,
double,
phi::dtype::float16) {}
PD_REGISTER_KERNEL(max_pool2d_with_index,
GPU,
ALL_LAYOUT,
phi::MaxPool2dWithIndexKernel,
float,
double) {
kernel->OutputAt(1).SetDataType(
paddle::experimental::CppTypeToDataType<int>::Type());
}
PD_REGISTER_KERNEL(pool3d,
GPU,
ALL_LAYOUT,
phi::Pool3dKernel,
float,
double,
phi::dtype::float16) {}
PD_REGISTER_KERNEL(max_pool3d_with_index,
GPU,
ALL_LAYOUT,
phi::MaxPool3dWithIndexKernel,
float,
double) {
kernel->OutputAt(1).SetDataType(
paddle::experimental::CppTypeToDataType<int>::Type());
}
...@@ -37,7 +37,7 @@ void SplitKernel(const Context& dev_ctx, ...@@ -37,7 +37,7 @@ void SplitKernel(const Context& dev_ctx,
out_metas_ptr.push_back(&out_metas.back()); out_metas_ptr.push_back(&out_metas.back());
} }
phi::SplitInferMeta(x, num_or_sections, axis_scalar, out_metas_ptr, true); phi::SplitInferMeta(x, num_or_sections, axis_scalar, out_metas_ptr);
for (size_t i = 0; i < out_metas.size(); ++i) { for (size_t i = 0; i < out_metas.size(); ++i) {
outs[i]->Resize(out_metas[i].dims()); outs[i]->Resize(out_metas[i].dims());
......
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <string>
#include "paddle/fluid/platform/device/gpu/gpu_dnn.h"
namespace phi {
using GPUDNNDataLayout = paddle::platform::DataLayout;
using PoolingMode = paddle::platform::PoolingMode;
using ScopedPoolingDescriptor = paddle::platform::ScopedPoolingDescriptor;
using ScopedTensorDescriptor = paddle::platform::ScopedTensorDescriptor;
template <typename T>
using ScalingParamType =
typename paddle::platform::CudnnDataType<T>::ScalingParamType;
inline GPUDNNDataLayout GetLayoutFromStr(std::string data_format) {
if (data_format == "NHWC") {
return GPUDNNDataLayout::kNHWC;
} else if (data_format == "NCHW") {
return GPUDNNDataLayout::kNCHW;
} else if (data_format == "NCDHW") {
return GPUDNNDataLayout::kNCDHW;
} else {
return GPUDNNDataLayout::kNCDHW;
}
}
} // namespace phi
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/phi/kernels/pool_grad_kernel.h"
#include "paddle/phi/kernels/gpudnn/pool_gpudnn.h"
#include "paddle/fluid/platform/device/gpu/gpu_dnn.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/funcs/pooling.h"
#include "paddle/phi/kernels/pool_kernel.h"
#ifdef PADDLE_WITH_HIP
#include "paddle/phi/kernels/impl/pool_grad_kernel_impl.h" // PoolGradRawGPUDNNKernel will call PoolGradRawKernel for pooling type "max" in ROCm
#endif
namespace phi {
template <typename T, typename Context>
void PoolGradRawGPUDNNKernel(const Context& ctx,
const DenseTensor& x,
const DenseTensor& out,
const DenseTensor& dout,
const std::vector<int>& kernel_size,
const std::vector<int>& strides,
const std::vector<int>& paddings,
bool exclusive,
const std::string& data_format,
const std::string& pooling_type,
bool global_pooling,
bool adaptive,
const std::string& padding_algorithm,
DenseTensor* dx) {
PADDLE_ENFORCE_EQ(
paddle::platform::is_gpu_place(ctx.GetPlace()),
true,
errors::InvalidArgument("Pool operator CUDA kernel must use CUDAPlace "
"rather than CPUPlace."));
const DenseTensor* input = &x;
const DenseTensor* output = &out;
const DenseTensor* output_grad = &dout;
DenseTensor* input_grad = dx;
std::vector<int> paddings_ = paddings;
std::vector<int> kernel_size_ = kernel_size;
const bool channel_last = (data_format == "NHWC" || data_format == "NDHWC");
#ifdef PADDLE_WITH_HIP
if (pooling_type == "max") {
PoolGradRawKernel<T, GPUContext>(ctx,
x,
out,
dout,
kernel_size,
strides,
paddings_,
exclusive,
data_format,
pooling_type,
global_pooling,
adaptive,
padding_algorithm,
dx);
return;
}
#endif
// update paddings
auto in_x_dims = input->dims();
DDim data_dims;
if (channel_last) {
data_dims = slice_ddim(in_x_dims, 1, in_x_dims.size() - 1);
} else {
data_dims = slice_ddim(in_x_dims, 2, in_x_dims.size());
}
funcs::UpdatePadding(&paddings_,
global_pooling,
adaptive,
padding_algorithm,
data_dims,
strides,
kernel_size_);
if (data_dims.size() * 2 == static_cast<int>(paddings_.size())) {
for (int i = 0; i < data_dims.size(); ++i) {
paddings_.erase(paddings_.begin() + i + 1);
}
}
if (global_pooling) {
funcs::UpdateKernelSize(&kernel_size_, data_dims);
}
// ------- tensor grad --------------
DenseTensor transformed_input(input->type());
DenseTensor transformed_output(output->type());
DenseTensor transformed_output_grad(output_grad->type());
ctx.template Alloc<T>(input_grad);
DenseTensor transformed_input_grad(input_grad->type());
GPUDNNDataLayout layout;
const std::string str_NCHW = "NCHW", str_NHWC = "NHWC";
const std::string str_NCDHW = "NCDHW", str_NDHWC = "NDHWC";
if (data_format == str_NDHWC) {
layout = GPUDNNDataLayout::kNCDHW;
std::vector<int> axis{0, 4, 1, 2, 3};
// input
transformed_input.Resize(input->dims());
auto in_dims_vec = vectorize(input->dims());
in_dims_vec[1] = input->dims()[4];
in_dims_vec[2] = input->dims()[1];
in_dims_vec[3] = input->dims()[2];
in_dims_vec[4] = input->dims()[3];
transformed_input.Resize(make_ddim(in_dims_vec));
ctx.Alloc(&transformed_input, input->type());
funcs::Transpose<Context, T, 5> trans5;
trans5(ctx, *input, &transformed_input, axis);
// output
transformed_output.Resize(output->dims());
auto out_dims_vec = vectorize(output->dims());
out_dims_vec[1] = output->dims()[4];
out_dims_vec[2] = output->dims()[1];
out_dims_vec[3] = output->dims()[2];
out_dims_vec[4] = output->dims()[3];
transformed_output.Resize(make_ddim(out_dims_vec));
ctx.Alloc(&transformed_output, output->type());
funcs::Transpose<Context, T, 5> trans5_v2;
trans5_v2(ctx, *output, &transformed_output, axis);
// output grad
transformed_output_grad.Resize(make_ddim(out_dims_vec));
ctx.Alloc(&transformed_output_grad, output_grad->type());
funcs::Transpose<Context, T, 5> trans5_v3;
trans5_v3(ctx, *output_grad, &transformed_output_grad, axis);
// input grad
transformed_input_grad.Resize(make_ddim(in_dims_vec));
#ifdef PADDLE_WITH_HIP
// MIOPEN not support NHWC data layout
} else if (data_format == str_NHWC) {
layout = GPUDNNDataLayout::kNCHW;
std::vector<int> axis{0, 3, 1, 2};
// input
transformed_input.Resize(input->dims());
auto in_dims_vec = vectorize(input->dims());
in_dims_vec[1] = input->dims()[3];
in_dims_vec[2] = input->dims()[1];
in_dims_vec[3] = input->dims()[2];
transformed_input.Resize(make_ddim(in_dims_vec));
ctx.Alloc(&transformed_input, input->type());
funcs::Transpose<Context, T, 4> trans4;
trans4(ctx, *input, &transformed_input, axis);
// output
transformed_output.Resize(output->dims());
auto out_dims_vec = vectorize(output->dims());
out_dims_vec[1] = output->dims()[3];
out_dims_vec[2] = output->dims()[1];
out_dims_vec[3] = output->dims()[2];
transformed_output.Resize(make_ddim(out_dims_vec));
ctx.Alloc(&transformed_output, output->type());
funcs::Transpose<Context, T, 4> trans4_v2;
trans4_v2(ctx, *output, &transformed_output, axis);
// output grad
transformed_output_grad.Resize(make_ddim(out_dims_vec));
ctx.Alloc(&transformed_output_grad, output_grad->type());
funcs::Transpose<Context, T, 4> trans4_v3;
trans4_v3(ctx, *output_grad, &transformed_output_grad, axis);
// input grad
transformed_input_grad.Resize(make_ddim(in_dims_vec));
#endif
} else {
layout = GetLayoutFromStr(data_format);
transformed_input = *input;
transformed_output = *output;
transformed_output_grad = *output_grad;
transformed_input_grad = *input_grad;
}
const T* input_data = transformed_input.data<T>();
const T* output_data = transformed_output.data<T>();
const T* output_grad_data = transformed_output_grad.data<T>();
// ------------------- cudnn descriptors ---------------------
ScopedTensorDescriptor input_desc;
ScopedTensorDescriptor output_desc;
ScopedPoolingDescriptor pool_desc;
#ifdef PADDLE_WITH_HIP
miopenTensorDescriptor_t cudnn_input_desc = input_desc.descriptor<T>(
layout, vectorize<int>(transformed_input.dims()));
miopenTensorDescriptor_t cudnn_output_desc = output_desc.descriptor<T>(
layout, vectorize<int>(transformed_output.dims()));
#else
cudnnTensorDescriptor_t cudnn_input_desc = input_desc.descriptor<T>(
layout, vectorize<int>(transformed_input.dims()));
cudnnTensorDescriptor_t cudnn_output_desc = output_desc.descriptor<T>(
layout, vectorize<int>(transformed_output.dims()));
#endif
PoolingMode pooling_mode;
if (pooling_type == "max") {
if (FLAGS_cudnn_deterministic) {
pooling_mode = PoolingMode::kMaximumDeterministic;
} else {
pooling_mode = PoolingMode::kMaximum;
}
} else {
pooling_mode = exclusive ? PoolingMode::kAverageExclusive
: PoolingMode::kAverageInclusive;
}
#ifdef PADDLE_WITH_HIP
miopenPoolingDescriptor_t cudnn_pool_desc =
pool_desc.descriptor(pooling_mode, kernel_size_, paddings_, strides);
#else
cudnnPoolingDescriptor_t cudnn_pool_desc =
pool_desc.descriptor(pooling_mode, kernel_size_, paddings_, strides);
#endif
// ------------------- cudnn pool algorithm ---------------------
auto handle = ctx.cudnn_handle();
ScalingParamType<T> alpha = 1.0f, beta = 0.0f;
if (input_grad) {
T* input_grad_data = ctx.template Alloc<T>(&transformed_input_grad);
// Because beta is zero, it is unnecessary to reset input_grad.
#ifdef PADDLE_WITH_HIP
char* pool_workspace;
size_t pool_worksize = 0;
PADDLE_ENFORCE_GPU_SUCCESS(dynload::miopenPoolingGetWorkSpaceSizeV2(
cudnn_pool_desc, cudnn_output_desc, &pool_worksize));
PADDLE_ENFORCE_GPU_SUCCESS(hipMalloc(&pool_workspace, pool_worksize));
PADDLE_ENFORCE_GPU_SUCCESS(dynload::miopenPoolingBackward(handle,
cudnn_pool_desc,
&alpha,
cudnn_output_desc,
output_data,
cudnn_output_desc,
output_grad_data,
cudnn_input_desc,
input_data,
&beta,
cudnn_input_desc,
input_grad_data,
pool_workspace));
PADDLE_ENFORCE_GPU_SUCCESS(hipFree(pool_workspace));
#else
PADDLE_ENFORCE_GPU_SUCCESS(dynload::cudnnPoolingBackward(handle,
cudnn_pool_desc,
&alpha,
cudnn_output_desc,
output_data,
cudnn_output_desc,
output_grad_data,
cudnn_input_desc,
input_data,
&beta,
cudnn_input_desc,
input_grad_data));
#endif
if (data_format == str_NDHWC) {
std::vector<int> axis{0, 2, 3, 4, 1};
funcs::Transpose<Context, T, 5> trans5_v4;
trans5_v4(ctx, transformed_input_grad, input_grad, axis);
}
#ifdef PADDLE_WITH_HIP
// MIOPEN not support NHWC data layout
if (data_format == str_NHWC) {
std::vector<int> axis{0, 2, 3, 1};
funcs::Transpose<Context, T, 4> trans4_v4;
trans4_v4(ctx, transformed_input_grad, input_grad, axis);
}
#endif
}
}
template <typename T, typename Context>
void Pool2dGradGPUDNNKernel(const Context& ctx,
const DenseTensor& x,
const DenseTensor& out,
const DenseTensor& dout,
const std::vector<int>& kernel_size,
const std::vector<int>& strides,
const std::vector<int>& paddings,
bool ceil_mode,
bool exclusive,
const std::string& data_format,
const std::string& pooling_type,
bool global_pooling,
bool adaptive,
const std::string& padding_algorithm,
DenseTensor* dx) {
PoolGradRawGPUDNNKernel<T, Context>(ctx,
x,
out,
dout,
kernel_size,
strides,
paddings,
exclusive,
data_format,
pooling_type,
global_pooling,
adaptive,
padding_algorithm,
dx);
}
template <typename T, typename Context>
void Pool2dDoubleGradGPUDNNKernel(const Context& ctx,
const DenseTensor& x,
const std::vector<int>& kernel_size,
const std::vector<int>& strides,
const std::vector<int>& paddings,
bool ceil_mode,
bool exclusive,
const std::string& data_format,
const std::string& pooling_type,
bool global_pooling,
bool adaptive,
const std::string& padding_algorithm,
DenseTensor* out) {
if (pooling_type == "max") {
PADDLE_THROW(
errors::InvalidArgument("Pool op grad grad only supports avgpool."));
} else {
Pool2dGPUDNNKernel<T, Context>(ctx,
x,
kernel_size,
strides,
paddings,
ceil_mode,
exclusive,
data_format,
pooling_type,
global_pooling,
adaptive,
padding_algorithm,
out);
}
}
template <typename T, typename Context>
void Pool3dGradGPUDNNKernel(const Context& ctx,
const DenseTensor& x,
const DenseTensor& out,
const DenseTensor& dout,
const std::vector<int>& kernel_size,
const std::vector<int>& strides,
const std::vector<int>& paddings,
bool ceil_mode,
bool exclusive,
const std::string& data_format,
const std::string& pooling_type,
bool global_pooling,
bool adaptive,
const std::string& padding_algorithm,
DenseTensor* dx) {
PoolGradRawGPUDNNKernel<T, Context>(ctx,
x,
out,
dout,
kernel_size,
strides,
paddings,
exclusive,
data_format,
pooling_type,
global_pooling,
adaptive,
padding_algorithm,
dx);
}
} // namespace phi
using phi::dtype::float16;
#ifdef PADDLE_WITH_HIP
// MIOPEN do not support double
PD_REGISTER_KERNEL(pool2d_grad,
GPUDNN,
ALL_LAYOUT,
phi::Pool2dGradGPUDNNKernel,
float,
float16) {}
PD_REGISTER_KERNEL(pool2d_double_grad,
GPUDNN,
ALL_LAYOUT,
phi::Pool2dDoubleGradGPUDNNKernel,
float,
float16) {}
PD_REGISTER_KERNEL(pool3d_grad,
GPUDNN,
ALL_LAYOUT,
phi::Pool3dGradGPUDNNKernel,
float,
float16) {}
#else
PD_REGISTER_KERNEL(pool2d_grad,
GPUDNN,
ALL_LAYOUT,
phi::Pool2dGradGPUDNNKernel,
float,
double,
float16) {}
PD_REGISTER_KERNEL(pool2d_double_grad,
GPUDNN,
ALL_LAYOUT,
phi::Pool2dDoubleGradGPUDNNKernel,
float,
double,
float16) {}
PD_REGISTER_KERNEL(pool3d_grad,
GPUDNN,
ALL_LAYOUT,
phi::Pool3dGradGPUDNNKernel,
float,
double,
float16) {}
#endif
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/phi/kernels/pool_kernel.h"
#include "paddle/phi/kernels/gpudnn/pool_gpudnn.h"
#include "paddle/fluid/platform/device/gpu/gpu_dnn.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/funcs/pooling.h"
namespace phi {
template <typename T, typename Context>
void PoolRawGPUDNNKernel(const Context& ctx,
const DenseTensor& x,
const std::vector<int>& kernel_size,
const std::vector<int>& strides,
const std::vector<int>& paddings,
bool exclusive,
const std::string& data_format,
const std::string& pooling_type,
bool global_pooling,
bool adaptive,
const std::string& padding_algorithm,
DenseTensor* out) {
PADDLE_ENFORCE_EQ(
paddle::platform::is_gpu_place(ctx.GetPlace()),
true,
errors::InvalidArgument("Pool operator CUDA kernel must use CUDAPlace "
"rather than CPUPlace."));
const DenseTensor* input = &x;
DenseTensor* output = out;
std::vector<int> paddings_ = paddings;
std::vector<int> kernel_size_ = kernel_size;
ctx.template Alloc<T>(output);
const bool channel_last = (data_format == "NHWC" || data_format == "NDHWC");
// update paddings_
auto x_dims = input->dims();
DDim data_dims;
if (channel_last) {
data_dims = slice_ddim(x_dims, 1, x_dims.size() - 1);
} else {
data_dims = slice_ddim(x_dims, 2, x_dims.size());
}
funcs::UpdatePadding(&paddings_,
global_pooling,
adaptive,
padding_algorithm,
data_dims,
strides,
kernel_size_);
if (data_dims.size() * 2 == static_cast<int>(paddings_.size())) {
for (int i = 0; i < data_dims.size(); ++i) {
paddings_.erase(paddings_.begin() + i + 1);
}
}
if (global_pooling) {
funcs::UpdateKernelSize(&kernel_size_, data_dims);
}
const std::string str_NCHW = "NCHW", str_NHWC = "NHWC";
const std::string str_NCDHW = "NCDHW", str_NDHWC = "NDHWC";
// -----------------transformed tensor ------------------------
DenseTensor transformed_input(input->type());
DenseTensor transformed_output(output->type());
GPUDNNDataLayout layout;
if (data_format == str_NDHWC) {
layout = GPUDNNDataLayout::kNCDHW;
std::vector<int> axis{0, 4, 1, 2, 3};
// input
transformed_input.Resize(input->dims());
auto in_dims_vec = vectorize(input->dims());
in_dims_vec[1] = input->dims()[4];
in_dims_vec[2] = input->dims()[1];
in_dims_vec[3] = input->dims()[2];
in_dims_vec[4] = input->dims()[3];
transformed_input.Resize(make_ddim(in_dims_vec));
ctx.Alloc(&transformed_input, input->type());
funcs::Transpose<Context, T, 5> trans5;
trans5(ctx, *input, &transformed_input, axis);
// output
transformed_output.Resize(output->dims());
auto out_dims_vec = vectorize(output->dims());
out_dims_vec[1] = output->dims()[4];
out_dims_vec[2] = output->dims()[1];
out_dims_vec[3] = output->dims()[2];
out_dims_vec[4] = output->dims()[3];
transformed_output.Resize(make_ddim(out_dims_vec));
#ifdef PADDLE_WITH_HIP
// MIOPEN not support NHWC data layout
} else if (data_format == str_NHWC) {
layout = GPUDNNDataLayout::kNCHW;
std::vector<int> axis{0, 3, 1, 2};
transformed_input.Resize(input->dims());
auto in_dims_vec = vectorize(input->dims());
in_dims_vec[1] = input->dims()[3];
in_dims_vec[2] = input->dims()[1];
in_dims_vec[3] = input->dims()[2];
transformed_input.Resize(make_ddim(in_dims_vec));
ctx.Alloc(&transformed_input, input->type());
funcs::Transpose<Context, T, 4> trans;
trans(ctx, *input, &transformed_input, axis);
transformed_output.Resize(output->dims());
auto out_dims_vec = vectorize(output->dims());
out_dims_vec[1] = output->dims()[3];
out_dims_vec[2] = output->dims()[1];
out_dims_vec[3] = output->dims()[2];
transformed_output.Resize(make_ddim(out_dims_vec));
#endif
} else {
layout = GetLayoutFromStr(data_format);
transformed_input = *input;
transformed_output = *output;
}
const T* tranformed_input_data = transformed_input.data<T>();
T* tranformed_output_data = ctx.template Alloc<T>(&transformed_output);
// ------------------- cudnn descriptors ---------------------
ScopedTensorDescriptor input_desc;
ScopedTensorDescriptor output_desc;
ScopedPoolingDescriptor pool_desc;
#ifdef PADDLE_WITH_HIP
miopenTensorDescriptor_t cudnn_input_desc = input_desc.descriptor<T>(
layout, vectorize<int>(transformed_input.dims()));
miopenTensorDescriptor_t cudnn_output_desc = output_desc.descriptor<T>(
layout, vectorize<int>(transformed_output.dims()));
#else
cudnnTensorDescriptor_t cudnn_input_desc = input_desc.descriptor<T>(
layout, vectorize<int>(transformed_input.dims()));
cudnnTensorDescriptor_t cudnn_output_desc = output_desc.descriptor<T>(
layout, vectorize<int>(transformed_output.dims()));
#endif
PoolingMode pooling_mode;
if (pooling_type == "max") {
pooling_mode = PoolingMode::kMaximum;
} else {
pooling_mode = exclusive ? PoolingMode::kAverageExclusive
: PoolingMode::kAverageInclusive;
}
#ifdef PADDLE_WITH_HIP
miopenPoolingDescriptor_t cudnn_pool_desc =
pool_desc.descriptor(pooling_mode, kernel_size_, paddings_, strides);
#else
cudnnPoolingDescriptor_t cudnn_pool_desc =
pool_desc.descriptor(pooling_mode, kernel_size_, paddings_, strides);
#endif
// ------------------- cudnn pool algorithm ---------------------
auto handle = ctx.cudnn_handle();
ScalingParamType<T> alpha = 1.0f, beta = 0.0f;
#ifdef PADDLE_WITH_HIP
char* pool_workspace;
size_t pool_workernel_size_ = 0;
PADDLE_ENFORCE_GPU_SUCCESS(dynload::miopenPoolingGetWorkSpaceSizeV2(
cudnn_pool_desc, cudnn_output_desc, &pool_workernel_size_));
PADDLE_ENFORCE_GPU_SUCCESS(hipMalloc(&pool_workspace, pool_workernel_size_));
PADDLE_ENFORCE_GPU_SUCCESS(
dynload::miopenPoolingForward(handle,
cudnn_pool_desc,
&alpha,
cudnn_input_desc,
tranformed_input_data,
&beta,
cudnn_output_desc,
tranformed_output_data,
false,
pool_workspace,
pool_workernel_size_));
PADDLE_ENFORCE_GPU_SUCCESS(hipFree(pool_workspace));
#else
PADDLE_ENFORCE_GPU_SUCCESS(
dynload::cudnnPoolingForward(handle,
cudnn_pool_desc,
&alpha,
cudnn_input_desc,
tranformed_input_data,
&beta,
cudnn_output_desc,
tranformed_output_data));
#endif
// add
if (data_format == str_NDHWC) {
std::vector<int> axis{0, 2, 3, 4, 1};
funcs::Transpose<Context, T, 5> trans5_v2;
trans5_v2(ctx, transformed_output, output, axis);
}
#ifdef PADDLE_WITH_HIP
// MIOPEN not support NHWC data layout
if (data_format == str_NHWC) {
std::vector<int> axis{0, 2, 3, 1};
funcs::Transpose<Context, T, 4> trans;
trans(ctx, transformed_output, output, axis);
}
#endif
}
template <typename T, typename Context>
void Pool2dGPUDNNKernel(const Context& ctx,
const DenseTensor& x,
const std::vector<int>& kernel_size,
const std::vector<int>& strides,
const std::vector<int>& paddings,
bool ceil_mode,
bool exclusive,
const std::string& data_format,
const std::string& pooling_type,
bool global_pooling,
bool adaptive,
const std::string& padding_algorithm,
DenseTensor* out) {
PoolRawGPUDNNKernel<T, Context>(ctx,
x,
kernel_size,
strides,
paddings,
exclusive,
data_format,
pooling_type,
global_pooling,
adaptive,
padding_algorithm,
out);
}
template <typename T, typename Context>
void Pool3dGPUDNNKernel(const Context& ctx,
const DenseTensor& x,
const std::vector<int>& kernel_size,
const std::vector<int>& strides,
const std::vector<int>& paddings,
bool ceil_mode,
bool exclusive,
const std::string& data_format,
const std::string& pooling_type,
bool global_pooling,
bool adaptive,
const std::string& padding_algorithm,
DenseTensor* out) {
PoolRawGPUDNNKernel<T, Context>(ctx,
x,
kernel_size,
strides,
paddings,
exclusive,
data_format,
pooling_type,
global_pooling,
adaptive,
padding_algorithm,
out);
}
} // namespace phi
using phi::dtype::float16;
#ifdef PADDLE_WITH_HIP
// MIOPEN do not support double
PD_REGISTER_KERNEL(
pool2d, GPUDNN, ALL_LAYOUT, phi::Pool2dGPUDNNKernel, float, float16) {}
PD_REGISTER_KERNEL(
pool3d, GPUDNN, ALL_LAYOUT, phi::Pool3dGPUDNNKernel, float, float16) {}
#else
PD_REGISTER_KERNEL(pool2d,
GPUDNN,
ALL_LAYOUT,
phi::Pool2dGPUDNNKernel,
float,
double,
float16) {}
PD_REGISTER_KERNEL(pool3d,
GPUDNN,
ALL_LAYOUT,
phi::Pool3dGPUDNNKernel,
float,
double,
float16) {}
#endif
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "paddle/phi/kernels/pool_grad_kernel.h"
#include "paddle/phi/core/ddim.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/funcs/pooling.h"
#include "paddle/phi/kernels/pool_kernel.h"
namespace phi {
template <typename T, typename Context>
void PoolGradRawKernel(const Context& ctx,
const DenseTensor& x,
const DenseTensor& out,
const DenseTensor& dout,
const std::vector<int>& kernel_size,
const std::vector<int>& strides,
const std::vector<int>& paddings,
bool exclusive,
const std::string& data_format,
const std::string& pooling_type,
bool global_pooling,
bool adaptive,
const std::string& padding_algorithm,
DenseTensor* dx) {
const bool channel_last = (data_format == "NHWC" || data_format == "NDHWC");
std::vector<int> paddings_ = paddings;
std::vector<int> kernel_size_ = kernel_size;
// update paddings
auto x_dims = x.dims();
DDim data_dims;
if (channel_last) {
data_dims = slice_ddim(x_dims, 1, x_dims.size() - 1);
} else {
data_dims = slice_ddim(x_dims, 2, x_dims.size());
}
funcs::UpdatePadding(&paddings_,
global_pooling,
adaptive,
padding_algorithm,
data_dims,
strides,
kernel_size_);
if (data_dims.size() * 2 == static_cast<int>(paddings_.size())) {
for (int i = 0; i < data_dims.size(); ++i) {
paddings_.erase(paddings_.begin() + i + 1);
}
}
if (global_pooling) {
funcs::UpdateKernelSize(&kernel_size_, data_dims);
}
if (dx) {
ctx.template Alloc<T>(dx);
funcs::SetConstant<Context, T> set_constant;
set_constant(ctx, dx, static_cast<T>(0.0));
switch (kernel_size_.size()) {
case 2: {
if (pooling_type == "max") {
funcs::MaxPool2dGradFunctor<Context, T> pool2d_backward;
pool2d_backward(ctx,
x,
out,
dout,
kernel_size_,
strides,
paddings_,
data_format,
dx);
} else if (pooling_type == "avg") {
funcs::Pool2dGradFunctor<Context, funcs::AvgPoolGrad<T>, T>
pool2d_backward;
funcs::AvgPoolGrad<T> pool_process;
pool2d_backward(ctx,
x,
out,
dout,
kernel_size_,
strides,
paddings_,
data_format,
exclusive,
adaptive,
dx,
pool_process);
}
} break;
case 3: {
if (pooling_type == "max") {
funcs::MaxPool3dGradFunctor<Context, T> pool3d_backward;
pool3d_backward(ctx,
x,
out,
dout,
kernel_size_,
strides,
paddings_,
data_format,
dx);
} else if (pooling_type == "avg") {
funcs::Pool3dGradFunctor<Context, funcs::AvgPoolGrad<T>, T>
pool3d_backward;
funcs::AvgPoolGrad<T> pool_process;
pool3d_backward(ctx,
x,
out,
dout,
kernel_size_,
strides,
paddings_,
data_format,
exclusive,
adaptive,
dx,
pool_process);
}
} break;
default: {
PADDLE_THROW(
errors::InvalidArgument("Pool op only supports 2D and 3D input."));
}
}
}
}
template <typename Context, typename T1, typename T2 = int>
void MaxPoolWithIndexGradRawKernel(const Context& ctx,
const DenseTensor& x,
const DenseTensor& mask,
const DenseTensor& dout,
const std::vector<int>& kernel_size,
const std::vector<int>& strides,
const std::vector<int>& paddings,
bool global_pooling,
bool adaptive,
DenseTensor* dx) {
std::vector<int> paddings_ = paddings;
std::vector<int> kernel_size_ = kernel_size;
if (global_pooling) {
for (size_t i = 0; i < kernel_size_.size(); ++i) {
paddings_[i] = 0;
kernel_size_[i] = static_cast<int>(dx->dims()[i + 2]);
}
}
if (dx) {
ctx.template Alloc<T1>(dx);
funcs::set_constant(ctx, dx, 0);
switch (kernel_size_.size()) {
case 2: {
funcs::MaxPool2dWithIndexGradFunctor<Context, T1, T2> pool2d_backward;
pool2d_backward(
ctx, dout, mask, kernel_size_, strides, paddings_, adaptive, dx);
} break;
case 3: {
funcs::MaxPool3dWithIndexGradFunctor<Context, T1, T2> pool3d_backward;
pool3d_backward(
ctx, dout, mask, kernel_size_, strides, paddings_, adaptive, dx);
} break;
default: {
PADDLE_THROW(
errors::InvalidArgument("Pool op only supports 2D and 3D input."));
}
}
}
}
template <typename T, typename Context>
void Pool2dGradKernel(const Context& ctx,
const DenseTensor& x,
const DenseTensor& out,
const DenseTensor& dout,
const std::vector<int>& kernel_size,
const std::vector<int>& strides,
const std::vector<int>& paddings,
bool ceil_mode,
bool exclusive,
const std::string& data_format,
const std::string& pooling_type,
bool global_pooling,
bool adaptive,
const std::string& padding_algorithm,
DenseTensor* dx) {
PoolGradRawKernel<T, Context>(ctx,
x,
out,
dout,
kernel_size,
strides,
paddings,
exclusive,
data_format,
pooling_type,
global_pooling,
adaptive,
padding_algorithm,
dx);
}
template <typename T, typename Context>
void Pool2dDoubleGradKernel(const Context& ctx,
const DenseTensor& x,
const std::vector<int>& kernel_size,
const std::vector<int>& strides,
const std::vector<int>& paddings,
bool ceil_mode,
bool exclusive,
const std::string& data_format,
const std::string& pooling_type,
bool global_pooling,
bool adaptive,
const std::string& padding_algorithm,
DenseTensor* out) {
if (pooling_type == "max") {
PADDLE_THROW(
errors::InvalidArgument("Pool op grad grad only supports avgpool."));
} else {
Pool2dKernel<T, Context>(ctx,
x,
kernel_size,
strides,
paddings,
ceil_mode,
exclusive,
data_format,
pooling_type,
global_pooling,
adaptive,
padding_algorithm,
out);
}
}
template <typename T, typename Context>
void MaxPool2dWithIndexGradKernel(const Context& ctx,
const DenseTensor& x,
const DenseTensor& mask,
const DenseTensor& dout,
const std::vector<int>& kernel_size,
const std::vector<int>& strides,
const std::vector<int>& paddings,
bool global_pooling,
bool adaptive,
DenseTensor* dx) {
MaxPoolWithIndexGradRawKernel<Context, T>(ctx,
x,
mask,
dout,
kernel_size,
strides,
paddings,
global_pooling,
adaptive,
dx);
}
template <typename T, typename Context>
void Pool3dGradKernel(const Context& ctx,
const DenseTensor& x,
const DenseTensor& out,
const DenseTensor& dout,
const std::vector<int>& kernel_size,
const std::vector<int>& strides,
const std::vector<int>& paddings,
bool ceil_mode,
bool exclusive,
const std::string& data_format,
const std::string& pooling_type,
bool global_pooling,
bool adaptive,
const std::string& padding_algorithm,
DenseTensor* dx) {
PoolGradRawKernel<T, Context>(ctx,
x,
out,
dout,
kernel_size,
strides,
paddings,
exclusive,
data_format,
pooling_type,
global_pooling,
adaptive,
padding_algorithm,
dx);
}
template <typename T, typename Context>
void MaxPool3dWithIndexGradKernel(const Context& ctx,
const DenseTensor& x,
const DenseTensor& mask,
const DenseTensor& dout,
const std::vector<int>& kernel_size,
const std::vector<int>& strides,
const std::vector<int>& paddings,
bool global_pooling,
bool adaptive,
DenseTensor* dx) {
MaxPoolWithIndexGradRawKernel<Context, T>(ctx,
x,
mask,
dout,
kernel_size,
strides,
paddings,
global_pooling,
adaptive,
dx);
}
} // namespace phi
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "paddle/phi/kernels/pool_kernel.h"
#include <algorithm>
#include "paddle/phi/core/ddim.h"
#include "paddle/phi/kernels/funcs/pooling.h"
#if defined(__HIPCC__) || defined(__NVCC__)
#include "paddle/phi/kernels/funcs/reduce_function.h"
#include "paddle/phi/kernels/primitive/functor_primitives.h"
#endif
namespace phi {
inline int GetReduceNum(const DenseTensor& input,
const DenseTensor* output,
const std::string data_format,
std::vector<int>* reduce_dim) {
// data_format only can be NCHW
bool channel_last = (data_format == "NHWC");
if (channel_last) {
return 0;
}
int reduce_num = 0;
const int output_height = output->dims()[2];
const int output_width = output->dims()[3];
if ((output_height == 1) && (output_width == 1)) {
reduce_dim->push_back(2);
reduce_dim->push_back(3);
reduce_num = input.dims()[2] * input.dims()[3];
}
return reduce_num;
}
template <typename T, typename Context>
void PoolRawKernel(const Context& ctx,
const DenseTensor& x,
const std::vector<int>& kernel_size,
const std::vector<int>& strides,
const std::vector<int>& paddings,
bool exclusive,
const std::string& data_format,
const std::string& pooling_type,
bool global_pooling,
bool adaptive,
const std::string& padding_algorithm,
DenseTensor* out) {
const bool channel_last = (data_format == "NHWC" || data_format == "NDHWC");
std::vector<int> paddings_ = paddings;
std::vector<int> kernel_size_ = kernel_size;
// update paddings
auto x_dims = x.dims();
DDim data_dims;
if (channel_last) {
data_dims = slice_ddim(x_dims, 1, x_dims.size() - 1);
} else {
data_dims = slice_ddim(x_dims, 2, x_dims.size());
}
funcs::UpdatePadding(&paddings_,
global_pooling,
adaptive,
padding_algorithm,
data_dims,
strides,
kernel_size_);
if (data_dims.size() * 2 == static_cast<int>(paddings_.size())) {
for (int i = 0; i < data_dims.size(); ++i) {
paddings_.erase(paddings_.begin() + i + 1);
}
}
if (global_pooling) {
funcs::UpdateKernelSize(&kernel_size_, data_dims);
}
switch (kernel_size_.size()) {
case 2: {
if (pooling_type == "max") {
funcs::Pool2dFunctor<Context, funcs::MaxPool<T>, T> pool2d_forward;
funcs::MaxPool<T> pool_process;
pool2d_forward(ctx,
x,
kernel_size_,
strides,
paddings_,
data_format,
true,
false,
out,
pool_process);
} else if (pooling_type == "avg") {
std::vector<int> reduce_dim;
int reduce_num = GetReduceNum(x, out, data_format, &reduce_dim);
if (reduce_num > 0 &&
adaptive) { // for adaptive_avg_pool2d && output_size == 1
#if defined(__HIPCC__) || defined(__NVCC__)
auto stream = ctx.stream();
funcs::ReduceKernel<T, T, kps::AddFunctor, kps::DivideFunctor<T>>(
ctx, x, out, kps::DivideFunctor<T>(reduce_num), reduce_dim);
#else // for cpu
funcs::Pool2dFunctor<Context, funcs::AvgPool<T>, T> pool2d_forward;
funcs::AvgPool<T> pool_process;
pool2d_forward(ctx,
x,
kernel_size_,
strides,
paddings_,
data_format,
exclusive,
adaptive,
out,
pool_process);
#endif
} else { // avgpool_2d or adaptive_avg_pool2d && output_size != 1
funcs::Pool2dFunctor<Context, funcs::AvgPool<T>, T> pool2d_forward;
funcs::AvgPool<T> pool_process;
pool2d_forward(ctx,
x,
kernel_size_,
strides,
paddings_,
data_format,
exclusive,
adaptive,
out,
pool_process);
}
}
} break;
case 3: {
if (pooling_type == "max") {
funcs::Pool3dFunctor<Context, funcs::MaxPool<T>, T> pool3d_forward;
funcs::MaxPool<T> pool_process;
pool3d_forward(ctx,
x,
kernel_size_,
strides,
paddings_,
data_format,
true,
false,
out,
pool_process);
} else if (pooling_type == "avg") {
funcs::Pool3dFunctor<Context, funcs::AvgPool<T>, T> pool3d_forward;
funcs::AvgPool<T> pool_process;
pool3d_forward(ctx,
x,
kernel_size_,
strides,
paddings_,
data_format,
exclusive,
adaptive,
out,
pool_process);
}
} break;
default: {
PADDLE_THROW(
errors::InvalidArgument("Pool op only supports 2D and 3D input."));
}
}
}
template <typename Context, typename T1, typename T2 = int>
void MaxPoolWithIndexRawKernel(const Context& ctx,
const DenseTensor& x,
const std::vector<int>& kernel_size,
const std::vector<int>& strides,
const std::vector<int>& paddings,
bool global_pooling,
bool adaptive,
DenseTensor* out,
DenseTensor* mask) {
std::vector<int> paddings_ = paddings;
std::vector<int> kernel_size_ = kernel_size;
if (global_pooling) {
for (size_t i = 0; i < kernel_size_.size(); ++i) {
paddings_[i] = 0;
kernel_size_[i] = static_cast<int>(x.dims()[i + 2]);
}
}
switch (kernel_size_.size()) {
case 2: {
funcs::MaxPool2dWithIndexFunctor<Context, T1, T2> pool2d_forward;
pool2d_forward(
ctx, x, kernel_size_, strides, paddings_, adaptive, out, mask);
} break;
case 3: {
funcs::MaxPool3dWithIndexFunctor<Context, T1, T2> pool3d_forward;
pool3d_forward(
ctx, x, kernel_size_, strides, paddings_, adaptive, out, mask);
} break;
default: {
PADDLE_THROW(
errors::InvalidArgument("Pool op only supports 2D and 3D input."));
}
}
}
template <typename T, typename Context>
void Pool2dKernel(const Context& ctx,
const DenseTensor& x,
const std::vector<int>& kernel_size,
const std::vector<int>& strides,
const std::vector<int>& paddings,
bool ceil_mode,
bool exclusive,
const std::string& data_format,
const std::string& pooling_type,
bool global_pooling,
bool adaptive,
const std::string& padding_algorithm,
DenseTensor* out) {
PoolRawKernel<T, Context>(ctx,
x,
kernel_size,
strides,
paddings,
exclusive,
data_format,
pooling_type,
global_pooling,
adaptive,
padding_algorithm,
out);
}
template <typename T, typename Context>
void MaxPool2dWithIndexKernel(const Context& ctx,
const DenseTensor& x,
const std::vector<int>& kernel_size,
const std::vector<int>& strides,
const std::vector<int>& paddings,
bool global_pooling,
bool adaptive,
DenseTensor* out,
DenseTensor* mask) {
MaxPoolWithIndexRawKernel<Context, T>(ctx,
x,
kernel_size,
strides,
paddings,
global_pooling,
adaptive,
out,
mask);
}
template <typename T, typename Context>
void Pool3dKernel(const Context& ctx,
const DenseTensor& x,
const std::vector<int>& kernel_size,
const std::vector<int>& strides,
const std::vector<int>& paddings,
bool ceil_mode,
bool exclusive,
const std::string& data_format,
const std::string& pooling_type,
bool global_pooling,
bool adaptive,
const std::string& padding_algorithm,
DenseTensor* out) {
PoolRawKernel<T, Context>(ctx,
x,
kernel_size,
strides,
paddings,
exclusive,
data_format,
pooling_type,
global_pooling,
adaptive,
padding_algorithm,
out);
}
template <typename T, typename Context>
void MaxPool3dWithIndexKernel(const Context& ctx,
const DenseTensor& x,
const std::vector<int>& kernel_size,
const std::vector<int>& strides,
const std::vector<int>& paddings,
bool global_pooling,
bool adaptive,
DenseTensor* out,
DenseTensor* mask) {
MaxPoolWithIndexRawKernel<Context, T>(ctx,
x,
kernel_size,
strides,
paddings,
global_pooling,
adaptive,
out,
mask);
}
} // namespace phi
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <string>
#include <vector>
#include "paddle/phi/core/dense_tensor.h"
namespace phi {
template <typename T, typename Context>
void Pool2dGradKernel(const Context& ctx,
const DenseTensor& x,
const DenseTensor& out,
const DenseTensor& dout,
const std::vector<int>& kernel_size,
const std::vector<int>& strides,
const std::vector<int>& paddings,
bool ceil_mode,
bool exclusive,
const std::string& data_format,
const std::string& pooling_type,
bool global_pooling,
bool adaptive,
const std::string& padding_algorithm,
DenseTensor* dx);
template <typename T, typename Context>
void Pool2dGradGPUDNNKernel(const Context& ctx,
const DenseTensor& x,
const DenseTensor& out,
const DenseTensor& dout,
const std::vector<int>& kernel_size,
const std::vector<int>& strides,
const std::vector<int>& paddings,
bool ceil_mode,
bool exclusive,
const std::string& data_format,
const std::string& pooling_type,
bool global_pooling,
bool adaptive,
const std::string& padding_algorithm,
DenseTensor* dx);
template <typename T, typename Context>
void Pool2dDoubleGradKernel(const Context& ctx,
const DenseTensor& x,
const std::vector<int>& kernel_size,
const std::vector<int>& strides,
const std::vector<int>& paddings,
bool ceil_mode,
bool exclusive,
const std::string& data_format,
const std::string& pooling_type,
bool global_pooling,
bool adaptive,
const std::string& padding_algorithm,
DenseTensor* out);
template <typename T, typename Context>
void Pool2dDoubleGradGPUDNNKernel(const Context& ctx,
const DenseTensor& x,
const std::vector<int>& kernel_size,
const std::vector<int>& strides,
const std::vector<int>& paddings,
bool ceil_mode,
bool exclusive,
const std::string& data_format,
const std::string& pooling_type,
bool global_pooling,
bool adaptive,
const std::string& padding_algorithm,
DenseTensor* out);
template <typename T, typename Context>
void MaxPool2dWithIndexGradKernel(const Context& ctx,
const DenseTensor& x,
const DenseTensor& mask,
const DenseTensor& dout,
const std::vector<int>& kernel_size,
const std::vector<int>& strides,
const std::vector<int>& paddings,
bool global_pooling,
bool adaptive,
DenseTensor* dx);
template <typename T, typename Context>
void Pool3dGradKernel(const Context& ctx,
const DenseTensor& x,
const DenseTensor& out,
const DenseTensor& dout,
const std::vector<int>& kernel_size,
const std::vector<int>& strides,
const std::vector<int>& paddings,
bool ceil_mode,
bool exclusive,
const std::string& data_format,
const std::string& pooling_type,
bool global_pooling,
bool adaptive,
const std::string& padding_algorithm,
DenseTensor* dx);
template <typename T, typename Context>
void Pool3dGradGPUDNNKernel(const Context& ctx,
const DenseTensor& x,
const DenseTensor& out,
const DenseTensor& dout,
const std::vector<int>& kernel_size,
const std::vector<int>& strides,
const std::vector<int>& paddings,
bool ceil_mode,
bool exclusive,
const std::string& data_format,
const std::string& pooling_type,
bool global_pooling,
bool adaptive,
const std::string& padding_algorithm,
DenseTensor* dx);
template <typename T, typename Context>
void MaxPool3dWithIndexGradKernel(const Context& ctx,
const DenseTensor& x,
const DenseTensor& mask,
const DenseTensor& dout,
const std::vector<int>& kernel_size,
const std::vector<int>& strides,
const std::vector<int>& paddings,
bool global_pooling,
bool adaptive,
DenseTensor* dx);
} // namespace phi
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <string>
#include <vector>
#include "paddle/phi/core/dense_tensor.h"
namespace phi {
template <typename T, typename Context>
void Pool2dKernel(const Context& ctx,
const DenseTensor& x,
const std::vector<int>& kernel_size,
const std::vector<int>& strides,
const std::vector<int>& paddings,
bool ceil_mode,
bool exclusive,
const std::string& data_format,
const std::string& pooling_type,
bool global_pooling,
bool adaptive,
const std::string& padding_algorithm,
DenseTensor* out);
template <typename T, typename Context>
void Pool2dGPUDNNKernel(const Context& ctx,
const DenseTensor& x,
const std::vector<int>& kernel_size,
const std::vector<int>& strides,
const std::vector<int>& paddings,
bool ceil_mode,
bool exclusive,
const std::string& data_format,
const std::string& pooling_type,
bool global_pooling,
bool adaptive,
const std::string& padding_algorithm,
DenseTensor* out);
template <typename T, typename Context>
void MaxPool2dWithIndexKernel(const Context& ctx,
const DenseTensor& x,
const std::vector<int>& kernel_size,
const std::vector<int>& strides,
const std::vector<int>& paddings,
bool global_pooling,
bool adaptive,
DenseTensor* out,
DenseTensor* mask);
template <typename T, typename Context>
void Pool3dKernel(const Context& ctx,
const DenseTensor& x,
const std::vector<int>& kernel_size,
const std::vector<int>& strides,
const std::vector<int>& paddings,
bool ceil_mode,
bool exclusive,
const std::string& data_format,
const std::string& pooling_type,
bool global_pooling,
bool adaptive,
const std::string& padding_algorithm,
DenseTensor* out);
template <typename T, typename Context>
void Pool3dGPUDNNKernel(const Context& ctx,
const DenseTensor& x,
const std::vector<int>& kernel_size,
const std::vector<int>& strides,
const std::vector<int>& paddings,
bool ceil_mode,
bool exclusive,
const std::string& data_format,
const std::string& pooling_type,
bool global_pooling,
bool adaptive,
const std::string& padding_algorithm,
DenseTensor* out);
template <typename T, typename Context>
void MaxPool3dWithIndexKernel(const Context& ctx,
const DenseTensor& x,
const std::vector<int>& kernel_size,
const std::vector<int>& strides,
const std::vector<int>& paddings,
bool global_pooling,
bool adaptive,
DenseTensor* out,
DenseTensor* mask);
} // namespace phi
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/core/compat/op_utils.h"
namespace phi {
KernelSignature Pool2dOpArgumentMapping(const ArgumentMappingContext& ctx) {
return KernelSignature("pool2d",
{"X"},
{"ksize",
"strides",
"paddings",
"ceil_mode",
"exclusive",
"data_format",
"pooling_type",
"global_pooling",
"adaptive",
"padding_algorithm"},
{"Out"});
}
KernelSignature Pool2dGradOpArgumentMapping(const ArgumentMappingContext& ctx) {
return KernelSignature("pool2d_grad",
{"X", "Out", GradVarName("Out")},
{"ksize",
"strides",
"paddings",
"ceil_mode",
"exclusive",
"data_format",
"pooling_type",
"global_pooling",
"adaptive",
"padding_algorithm"},
{GradVarName("X")});
}
KernelSignature Pool2dDoubleGradOpArgumentMapping(
const ArgumentMappingContext& ctx) {
return KernelSignature("pool2d_double_grad",
{"X"},
{"ksize",
"strides",
"paddings",
"ceil_mode",
"exclusive",
"data_format",
"pooling_type",
"global_pooling",
"adaptive",
"padding_algorithm"},
{"Out"});
}
KernelSignature MaxPool2dWithIndexOpArgumentMapping(
const ArgumentMappingContext& ctx) {
return KernelSignature(
"max_pool2d_with_index",
{"X"},
{"ksize", "strides", "paddings", "global_pooling", "adaptive"},
{"Out", "Mask"});
}
KernelSignature MaxPool2dWithIndexGradOpArgumentMapping(
const ArgumentMappingContext& ctx) {
return KernelSignature(
"max_pool2d_with_index_grad",
{"X", "Mask", GradVarName("Out")},
{"ksize", "strides", "paddings", "global_pooling", "adaptive"},
{GradVarName("X")});
}
KernelSignature Pool3dOpArgumentMapping(const ArgumentMappingContext& ctx) {
return KernelSignature("pool3d",
{"X"},
{"ksize",
"strides",
"paddings",
"ceil_mode",
"exclusive",
"data_format",
"pooling_type",
"global_pooling",
"adaptive",
"padding_algorithm"},
{"Out"});
}
KernelSignature Pool3dGradOpArgumentMapping(const ArgumentMappingContext& ctx) {
return KernelSignature("pool3d_grad",
{"X", "Out", GradVarName("Out")},
{"ksize",
"strides",
"paddings",
"ceil_mode",
"exclusive",
"data_format",
"pooling_type",
"global_pooling",
"adaptive",
"padding_algorithm"},
{GradVarName("X")});
}
KernelSignature MaxPool3dWithIndexOpArgumentMapping(
const ArgumentMappingContext& ctx) {
return KernelSignature(
"max_pool3d_with_index",
{"X"},
{"ksize", "strides", "paddings", "global_pooling", "adaptive"},
{"Out", "Mask"});
}
KernelSignature MaxPool3dWithIndexGradOpArgumentMapping(
const ArgumentMappingContext& ctx) {
return KernelSignature(
"max_pool3d_with_index_grad",
{"X", "Mask", GradVarName("Out")},
{"ksize", "strides", "paddings", "global_pooling", "adaptive"},
{GradVarName("X")});
}
} // namespace phi
PD_REGISTER_ARG_MAPPING_FN(pool2d, phi::Pool2dOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(pool2d_grad, phi::Pool2dGradOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(pool2d_double_grad,
phi::Pool2dDoubleGradOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(max_pool2d_with_index,
phi::MaxPool2dWithIndexOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(max_pool2d_with_index_grad,
phi::MaxPool2dWithIndexGradOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(pool3d, phi::Pool3dOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(pool3d_grad, phi::Pool3dGradOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(max_pool3d_with_index,
phi::MaxPool3dWithIndexOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(max_pool3d_with_index_grad,
phi::MaxPool3dWithIndexGradOpArgumentMapping);
...@@ -52,7 +52,7 @@ TEST(MetaFnFactory, InferMetaFnExists) { ...@@ -52,7 +52,7 @@ TEST(MetaFnFactory, InferMetaFnExists) {
phi::InferMetaContext ctx; phi::InferMetaContext ctx;
ctx.EmplaceBackInput(shared_meat_x); ctx.EmplaceBackInput(shared_meat_x);
ctx.EmplaceBackOutput(shared_meta_out); ctx.EmplaceBackOutput(shared_meta_out);
ctx.SetMetaConfig(/*is_runtime=*/true); ctx.SetMetaConfig({/*is_runtime =*/true, /*is_run_mkldnn_kernel=*/false});
phi::MetaFnFactory::Instance().Get("sign")(&ctx); phi::MetaFnFactory::Instance().Get("sign")(&ctx);
EXPECT_EQ(dense_out1.dims().size(), dense_out2.dims().size()); EXPECT_EQ(dense_out1.dims().size(), dense_out2.dims().size());
...@@ -78,7 +78,7 @@ TEST(MetaFnFactory, CopyInferMetaFn) { ...@@ -78,7 +78,7 @@ TEST(MetaFnFactory, CopyInferMetaFn) {
ctx.EmplaceBackAttr(Backend::CPU); ctx.EmplaceBackAttr(Backend::CPU);
ctx.EmplaceBackAttr(false); ctx.EmplaceBackAttr(false);
ctx.EmplaceBackOutput(shared_meta_out); ctx.EmplaceBackOutput(shared_meta_out);
ctx.SetMetaConfig(/*is_runtime=*/true); ctx.SetMetaConfig({/*is_runtime =*/true, /*is_run_mkldnn_kernel=*/false});
phi::MetaFnFactory::Instance().Get("copy_to")(&ctx); phi::MetaFnFactory::Instance().Get("copy_to")(&ctx);
EXPECT_EQ(dense_out1.dims().size(), dense_out2.dims().size()); EXPECT_EQ(dense_out1.dims().size(), dense_out2.dims().size());
...@@ -105,7 +105,7 @@ TEST(MetaFnFactory, SplitInferMetaFn) { ...@@ -105,7 +105,7 @@ TEST(MetaFnFactory, SplitInferMetaFn) {
ctx.EmplaceBackAttr(num_or_sections); ctx.EmplaceBackAttr(num_or_sections);
ctx.EmplaceBackAttr(axis); ctx.EmplaceBackAttr(axis);
ctx.EmplaceBackOutputs(out); ctx.EmplaceBackOutputs(out);
ctx.SetMetaConfig(/*is_runtime=*/true); ctx.SetMetaConfig({/*is_runtime =*/true, /*is_run_mkldnn_kernel=*/false});
phi::MetaFnFactory::Instance().Get("split")(&ctx); phi::MetaFnFactory::Instance().Get("split")(&ctx);
ASSERT_EQ(dense_out1.dims().size(), 2); ASSERT_EQ(dense_out1.dims().size(), 2);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册