diff --git a/paddle/operators/math/unpooling.cc b/paddle/operators/math/unpooling.cc index b13d0104de0e0ea65a7609b7cbaf6822ffb33fd6..71928314bae13c7093b90e17e8f37bd99958f80e 100644 --- a/paddle/operators/math/unpooling.cc +++ b/paddle/operators/math/unpooling.cc @@ -13,17 +13,15 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/operators/math/unpooling.h" - namespace paddle { namespace operators { namespace math { -// All tensors are in NCHW format template class Unpool2dMaxFunctor { -public: - void operator()(const platform::DeviceContext& context, - const framework::Tensor& input, - const framework::Tensor& indices, framework::Tensor* output) { + public: + void operator()( + const platform::DeviceContext& context, const framework::Tensor& input, + const framework::Tensor& indices, framework::Tensor* output) { const int batch_size = input.dims()[0]; const int input_height = input.dims()[2]; const int input_width = input.dims()[3]; @@ -51,13 +49,11 @@ public: }; template class Unpool2dMaxGradFunctor { -public: - void operator()(const platform::DeviceContext& context, - const framework::Tensor& input, - const framework::Tensor& indices, - const framework::Tensor& output, - const framework::Tensor& output_grad, - framework::Tensor* input_grad) { + public: + void operator()( + const platform::DeviceContext& context, const framework::Tensor& input, + const framework::Tensor& indices, const framework::Tensor& output, + const framework::Tensor& output_grad, framework::Tensor* input_grad) { const int batch_size = input.dims()[0]; const int input_height = input.dims()[2]; const int input_width = input.dims()[3]; diff --git a/paddle/operators/math/unpooling.cu b/paddle/operators/math/unpooling.cu index 601792087356df1a198e805ad5ad9ab31133eba0..4c6cb7bbca6d039e5075202b41109bc2ade33466 100644 --- a/paddle/operators/math/unpooling.cu +++ b/paddle/operators/math/unpooling.cu @@ -19,14 +19,10 @@ namespace paddle { namespace operators { namespace math { template -__global__ void KernelUnpool2dMax(const int nthreads, const T* input_data, - const int* indices_data, - const int input_height, - const int input_width, - const int channels, - T* output_data, - const int output_height, - const int output_width) { +__global__ void KernelUnpool2dMax( + const int nthreads, const T* input_data, const int* indices_data, + const int input_height, const int input_width, const int channels, + T* output_data, const int output_height, const int output_width) { int in_n_stride = input_height * input_width * channels; int in_c_stride = input_height * input_width; int out_n_stride = output_height * output_width * channels; @@ -44,16 +40,11 @@ __global__ void KernelUnpool2dMax(const int nthreads, const T* input_data, } } template -__global__ void KernelUnpool2dMaxGrad(const int nthreads, const T* input_data, - const int* indices_data, - const int input_height, - const int input_width, - const int channels, - const T* output_data, - const T* output_grad, - const int output_height, - const int output_width, - T* input_grad) { +__global__ void KernelUnpool2dMaxGrad( + const int nthreads, const T* input_data, const int* indices_data, + const int input_height, const int input_width, const int channels, + const T* output_data, const T* output_grad, const int output_height, + const int output_width, T* input_grad) { int in_n_stride = input_height * input_width * channels; int in_c_stride = input_height * input_width; int out_n_stride = output_height * output_width * channels; @@ -75,11 +66,10 @@ __global__ void KernelUnpool2dMaxGrad(const int nthreads, const T* input_data, */ template class Unpool2dMaxFunctor { -public: - void operator()(const platform::DeviceContext& context, - const framework::Tensor& input, - const framework::Tensor& indices, - framework::Tensor* output) { + public: + void operator()( + const platform::DeviceContext& context, const framework::Tensor& input, + const framework::Tensor& indices, framework::Tensor* output) { const int batch_size = input.dims()[0]; const int input_height = input.dims()[2]; const int input_width = input.dims()[3]; @@ -91,12 +81,11 @@ public: T* output_data = output->mutable_data(context.GetPlace()); int threads = 1024; int grid = (input.numel() + threads - 1) / threads; - KernelUnpool2dMax< - T><<<<(context) - .stream()>>>(input.numel(), input_data, indices_data, - input_height, input_width, output_channels, - output_data, output_height, output_width); + .stream()>>>(input.numel(), input_data, indices_data, + input_height, input_width, output_channels, + output_data, output_height, output_width); } }; /* @@ -104,7 +93,7 @@ public: */ template class Unpool2dMaxGradFunctor { -public: + public: void operator()(const platform::DeviceContext& context, const framework::Tensor& input, const framework::Tensor& indices, @@ -124,13 +113,11 @@ public: T* input_grad_data = input_grad->mutable_data(context.GetPlace()); int threads = 1024; int grid = (input.numel() + threads - 1) / threads; - KernelUnpool2dMaxGrad< - T><<(context) - .stream()>>>(input.numel(), input_data, indices_data, - input_height, input_width, output_channels, - output_data, output_grad_data, - output_height, output_width, input_grad_data); + KernelUnpool2dMaxGrad<<(context) + .stream()>>>(input.numel(), input_data, indices_data, + input_height, input_width, output_channels, output_data, + output_grad_data, output_height, output_width, input_grad_data); } }; template class Unpool2dMaxGradFunctor; diff --git a/paddle/operators/math/unpooling.h b/paddle/operators/math/unpooling.h index 0b969d8d8292a65292d47484d9b24002aa5442a8..43e32bf4fb58874806c1979791fe0f65a5eb7dfe 100644 --- a/paddle/operators/math/unpooling.h +++ b/paddle/operators/math/unpooling.h @@ -18,25 +18,20 @@ limitations under the License. */ namespace paddle { namespace operators { namespace math { - template - class Unpool2dMaxFunctor { -public: - void operator()(const platform::DeviceContext& context, - const framework::Tensor& input, - const framework::Tensor& indices, framework::Tensor* output); + public: + void operator()( + const platform::DeviceContext& context, const framework::Tensor& input, + const framework::Tensor& indices, framework::Tensor* output); }; - template class Unpool2dMaxGradFunctor { -public: - void operator()(const platform::DeviceContext& context, - const framework::Tensor& input, - const framework::Tensor& indices, - const framework::Tensor& output, - const framework::Tensor& output_grad, - framework::Tensor* input_grad); + public: + void operator()( + const platform::DeviceContext& context, const framework::Tensor& input, + const framework::Tensor& indices, const framework::Tensor& output, + const framework::Tensor& output_grad, framework::Tensor* input_grad); }; } // namespace math } // namespace operators diff --git a/paddle/operators/unpool_op.cc b/paddle/operators/unpool_op.cc index cabf17401b009a5d9b72621263ff9081c67cae20..a51df3aa4255f9e58a60d1f0e451627ee125fa13 100644 --- a/paddle/operators/unpool_op.cc +++ b/paddle/operators/unpool_op.cc @@ -31,13 +31,12 @@ class Unpool2dOpMaker : public framework::OpProtoAndCheckerMaker { "(Tensor) The input tensor of the indices given out by MaxPool2d. " "The format of input tensor is NCHW. Where N is batch size, C is the " "number of channels, H and W is the height and width of feature."); - AddOutput( - "Out", - "(Tensor) The output tensor of unpool operator." - "The format of output tensor is also NCHW." - "Where N is batch size, C is " - "the number of channels, H and W is the height and " - "width of feature."); + AddOutput("Out", + "(Tensor) The output tensor of unpool operator." + "The format of output tensor is also NCHW." + "Where N is batch size, C is " + "the number of channels, H and W is the height and " + "width of feature."); AddAttr>( "ksize", "(vector), the unpooling window size(height, width) " @@ -138,7 +137,7 @@ namespace ops = paddle::operators; REGISTER_OP(unpool, ops::UnpoolOp, ops::Unpool2dOpMaker, unpool_grad, ops::UnpoolOpGrad); REGISTER_OP_CPU_KERNEL( - unpool,ops::UnpoolKernel, + unpool, ops::UnpoolKernel, ops::UnpoolKernel); REGISTER_OP_CPU_KERNEL( unpool_grad, ops::UnpoolGradKernel, diff --git a/paddle/operators/unpool_op.cu.cc b/paddle/operators/unpool_op.cu.cc index d8214fc68774ed9663f815c0e5f9200a03349710..8ee9e2b373b5270d96bf2def9e63760be650d603 100644 --- a/paddle/operators/unpool_op.cu.cc +++ b/paddle/operators/unpool_op.cu.cc @@ -15,11 +15,9 @@ limitations under the License. */ #include "paddle/operators/unpool_op.h" namespace ops = paddle::operators; -REGISTER_OP_GPU_KERNEL(unpool, - ops::UnpoolKernel, - ops::UnpoolKernel); -REGISTER_OP_GPU_KERNEL(unpool_grad, - ops::UnpoolGradKernel, - ops::UnpoolGradKernel); +REGISTER_OP_GPU_KERNEL( + unpool, ops::UnpoolKernel, + ops::UnpoolKernel); +REGISTER_OP_GPU_KERNEL( + unpool_grad, ops::UnpoolGradKernel, + ops::UnpoolGradKernel); diff --git a/paddle/operators/unpool_op.h b/paddle/operators/unpool_op.h index 8fad768e494f744860b48c24e26026fe07a3a167..243eb7e532c5149db4fb1b381fd8664ae4bdd81a 100644 --- a/paddle/operators/unpool_op.h +++ b/paddle/operators/unpool_op.h @@ -20,7 +20,6 @@ limitations under the License. */ namespace paddle { namespace operators { - template class UnpoolKernel : public framework::OpKernel { public: @@ -41,7 +40,6 @@ class UnpoolKernel : public framework::OpKernel { unpool2d_max_forward(context.device_context(), *in_x, *in_y, out); } }; - template class UnpoolGradKernel : public framework::OpKernel { public: @@ -69,6 +67,5 @@ class UnpoolGradKernel : public framework::OpKernel { *out_grad, in_x_grad); } }; - } // namespace operators } // namespace paddle