提交 3206094b 编写于 作者: S sweetsky0901

format code

上级 d2ee3c98
...@@ -20,8 +20,8 @@ template <typename T> ...@@ -20,8 +20,8 @@ template <typename T>
class Unpool2dMaxFunctor<platform::CPUPlace, T> { class Unpool2dMaxFunctor<platform::CPUPlace, T> {
public: public:
void operator()(const platform::DeviceContext& context, void operator()(const platform::DeviceContext& context,
const framework::Tensor& input, const framework::Tensor& input,
const framework::Tensor& indices, framework::Tensor* output) { const framework::Tensor& indices, framework::Tensor* output) {
const int batch_size = input.dims()[0]; const int batch_size = input.dims()[0];
const int input_height = input.dims()[2]; const int input_height = input.dims()[2];
const int input_width = input.dims()[3]; const int input_width = input.dims()[3];
......
...@@ -20,11 +20,12 @@ namespace operators { ...@@ -20,11 +20,12 @@ namespace operators {
namespace math { namespace math {
template <typename T> template <typename T>
__global__ void KernelUnpool2dMax(const int nthreads, const T* input_data, __global__ void KernelUnpool2dMax(const int nthreads, const T* input_data,
const int* indices_data, const int* indices_data,
const int input_height, const int input_width, const int input_height,
const int channels, T* output_data, const int input_width,
const int output_height, const int channels, T* output_data,
const int output_width) { const int output_height,
const int output_width) {
int in_n_stride = input_height * input_width * channels; int in_n_stride = input_height * input_width * channels;
int in_c_stride = input_height * input_width; int in_c_stride = input_height * input_width;
int out_n_stride = output_height * output_width * channels; int out_n_stride = output_height * output_width * channels;
...@@ -42,12 +43,11 @@ __global__ void KernelUnpool2dMax(const int nthreads, const T* input_data, ...@@ -42,12 +43,11 @@ __global__ void KernelUnpool2dMax(const int nthreads, const T* input_data,
} }
} }
template <typename T> template <typename T>
__global__ void KernelUnpool2dMaxGrad(const int nthreads, const T* input_data, __global__ void KernelUnpool2dMaxGrad(
const int* indices_data, const int nthreads, const T* input_data, const int* indices_data,
const int input_height, const int input_width, const int input_height, const int input_width, const int channels,
const int channels, const T* output_data, const T* output_data, const T* output_grad, const int output_height,
const T* output_grad, const int output_height, const int output_width, T* input_grad) {
const int output_width, T* input_grad) {
int in_n_stride = input_height * input_width * channels; int in_n_stride = input_height * input_width * channels;
int in_c_stride = input_height * input_width; int in_c_stride = input_height * input_width;
int out_n_stride = output_height * output_width * channels; int out_n_stride = output_height * output_width * channels;
...@@ -71,8 +71,8 @@ template <typename T> ...@@ -71,8 +71,8 @@ template <typename T>
class Unpool2dMaxFunctor<platform::GPUPlace, T> { class Unpool2dMaxFunctor<platform::GPUPlace, T> {
public: public:
void operator()(const platform::DeviceContext& context, void operator()(const platform::DeviceContext& context,
const framework::Tensor& input, const framework::Tensor& indices, const framework::Tensor& input,
framework::Tensor* output) { const framework::Tensor& indices, framework::Tensor* output) {
const int batch_size = input.dims()[0]; const int batch_size = input.dims()[0];
const int input_height = input.dims()[2]; const int input_height = input.dims()[2];
const int input_width = input.dims()[3]; const int input_width = input.dims()[3];
...@@ -88,8 +88,8 @@ class Unpool2dMaxFunctor<platform::GPUPlace, T> { ...@@ -88,8 +88,8 @@ class Unpool2dMaxFunctor<platform::GPUPlace, T> {
T><<<grid, threads, 0, T><<<grid, threads, 0,
reinterpret_cast<const platform::CUDADeviceContext&>(context) reinterpret_cast<const platform::CUDADeviceContext&>(context)
.stream()>>>(input.numel(), input_data, indices_data, .stream()>>>(input.numel(), input_data, indices_data,
input_height, input_width, output_channels, input_height, input_width, output_channels,
output_data, output_height, output_width); output_data, output_height, output_width);
} }
}; };
/* /*
...@@ -121,9 +121,9 @@ class Unpool2dMaxGradFunctor<platform::GPUPlace, T> { ...@@ -121,9 +121,9 @@ class Unpool2dMaxGradFunctor<platform::GPUPlace, T> {
T><<<grid, threads, 0, T><<<grid, threads, 0,
reinterpret_cast<const platform::CUDADeviceContext&>(context) reinterpret_cast<const platform::CUDADeviceContext&>(context)
.stream()>>>(input.numel(), input_data, indices_data, .stream()>>>(input.numel(), input_data, indices_data,
input_height, input_width, output_channels, output_data, input_height, input_width, output_channels,
output_grad_data, output_height, output_width, output_data, output_grad_data, output_height,
input_grad_data); output_width, input_grad_data);
} }
}; };
template class Unpool2dMaxGradFunctor<platform::GPUPlace, float>; template class Unpool2dMaxGradFunctor<platform::GPUPlace, float>;
......
...@@ -23,8 +23,7 @@ class Unpool2dMaxFunctor { ...@@ -23,8 +23,7 @@ class Unpool2dMaxFunctor {
public: public:
void operator()(const platform::DeviceContext& context, void operator()(const platform::DeviceContext& context,
const framework::Tensor& input, const framework::Tensor& input,
const framework::Tensor& indices, const framework::Tensor& indices, framework::Tensor* output);
framework::Tensor* output);
}; };
template <typename Place, class T> template <typename Place, class T>
class Unpool2dMaxGradFunctor { class Unpool2dMaxGradFunctor {
......
...@@ -75,36 +75,38 @@ int OutputSize(int input_size, int ksize, int padding, int stride) { ...@@ -75,36 +75,38 @@ int OutputSize(int input_size, int ksize, int padding, int stride) {
class UnpoolOp : public framework::OperatorWithKernel { class UnpoolOp : public framework::OperatorWithKernel {
protected: protected:
framework::OpKernelType GetKernelType( framework::OpKernelType GetKernelType(
const framework::ExecutionContext& ctx) const override { const framework::ExecutionContext& ctx) const override {
return framework::OpKernelType( return framework::OpKernelType(
framework::ToDataType(ctx.Input<framework::Tensor>("X")->type()), framework::ToDataType(ctx.Input<framework::Tensor>("X")->type()),
ctx.device_context()); ctx.device_context());
} }
public: public:
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) of UnpoolOp" PADDLE_ENFORCE(ctx->HasInput("X"),
"should not be null."); "Input(X) of UnpoolOp"
PADDLE_ENFORCE(ctx->HasInput("Indices"), "Input(Indices) of UnpoolOp" "should not be null.");
PADDLE_ENFORCE(ctx->HasInput("Indices"),
"Input(Indices) of UnpoolOp"
"should not be null."); "should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("Out"), PADDLE_ENFORCE(ctx->HasOutput("Out"),
"Output(Out) of UnpoolOp should not be null."); "Output(Out) of UnpoolOp should not be null.");
auto in_x_dims = ctx->GetInputDim("X"); auto in_x_dims = ctx->GetInputDim("X");
auto in_y_dims = ctx->GetInputDim("Indices"); auto in_y_dims = ctx->GetInputDim("Indices");
std::string unpooling_type = std::string unpooling_type = ctx->Attrs()
ctx->Attrs().Get<std::string>("unpooling_type"); .Get<std::string>("unpooling_type");
std::vector<int> ksize = ctx->Attrs().Get<std::vector<int>>("ksize"); std::vector<int> ksize = ctx->Attrs().Get<std::vector<int>>("ksize");
std::vector<int> strides = ctx->Attrs().Get<std::vector<int>>("strides"); std::vector<int> strides = ctx->Attrs().Get<std::vector<int>>("strides");
std::vector<int> paddings = std::vector<int> paddings =
ctx->Attrs().Get<std::vector<int>>("paddings"); ctx->Attrs().Get<std::vector<int>>("paddings");
PADDLE_ENFORCE(in_x_dims.size() == 4, PADDLE_ENFORCE(in_x_dims.size() == 4,
"Unpooling intput must be of 4-dimensional."); "Unpooling intput must be of 4-dimensional.");
PADDLE_ENFORCE_EQ(in_x_dims, in_y_dims); PADDLE_ENFORCE_EQ(in_x_dims, in_y_dims);
std::vector<int64_t> output_shape({in_x_dims[0], in_x_dims[1]}); std::vector<int64_t> output_shape({in_x_dims[0], in_x_dims[1]});
for (size_t i = 0; i < ksize.size(); ++i) { for (size_t i = 0; i < ksize.size(); ++i) {
output_shape.push_back( output_shape.push_back(
OutputSize(in_x_dims[i + 2], ksize[i], paddings[i], strides[i])); OutputSize(in_x_dims[i + 2], ksize[i], paddings[i], strides[i]));
} }
ctx->SetOutputDim("Out", framework::make_ddim(output_shape)); ctx->SetOutputDim("Out", framework::make_ddim(output_shape));
} }
...@@ -113,30 +115,30 @@ class UnpoolOp : public framework::OperatorWithKernel { ...@@ -113,30 +115,30 @@ class UnpoolOp : public framework::OperatorWithKernel {
class UnpoolOpGrad : public framework::OperatorWithKernel { class UnpoolOpGrad : public framework::OperatorWithKernel {
protected: protected:
framework::OpKernelType GetKernelType( framework::OpKernelType GetKernelType(
const framework::ExecutionContext& ctx) const override { const framework::ExecutionContext& ctx) const override {
return framework::OpKernelType( return framework::OpKernelType(
framework::ToDataType(ctx.Input<framework::Tensor>("X")->type()), framework::ToDataType(ctx.Input<framework::Tensor>("X")->type()),
ctx.device_context()); ctx.device_context());
} }
public: public:
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) must not be null."); PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) must not be null.");
PADDLE_ENFORCE(ctx->HasOutput(framework::GradVarName("X")), PADDLE_ENFORCE(ctx->HasOutput(framework::GradVarName("X")),
"Input(X@GRAD) should not be null."); "Input(X@GRAD) should not be null.");
ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X")); ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X"));
} }
}; };
} // namespace operators } // namespace operators
} // namespace paddle } // namespace paddle
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OP(unpool, ops::UnpoolOp, ops::Unpool2dOpMaker, unpool_grad, REGISTER_OP(unpool, ops::UnpoolOp, ops::Unpool2dOpMaker, unpool_grad,
ops::UnpoolOpGrad); ops::UnpoolOpGrad);
REGISTER_OP_CPU_KERNEL( REGISTER_OP_CPU_KERNEL(unpool,
unpool, ops::UnpoolKernel<paddle::platform::CPUPlace, float>, ops::UnpoolKernel<paddle::platform::CPUPlace, float>,
ops::UnpoolKernel<paddle::platform::CPUPlace, double>); ops::UnpoolKernel<paddle::platform::CPUPlace, double>);
REGISTER_OP_CPU_KERNEL( REGISTER_OP_CPU_KERNEL(
unpool_grad, ops::UnpoolGradKernel<paddle::platform::CPUPlace, float>, unpool_grad, ops::UnpoolGradKernel<paddle::platform::CPUPlace, float>,
ops::UnpoolGradKernel<paddle::platform::CPUPlace, double>); ops::UnpoolGradKernel<paddle::platform::CPUPlace, double>);
......
...@@ -15,9 +15,9 @@ limitations under the License. */ ...@@ -15,9 +15,9 @@ limitations under the License. */
#include "paddle/operators/unpool_op.h" #include "paddle/operators/unpool_op.h"
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OP_GPU_KERNEL( REGISTER_OP_GPU_KERNEL(unpool,
unpool, ops::UnpoolKernel<paddle::platform::GPUPlace, float>, ops::UnpoolKernel<paddle::platform::GPUPlace, float>,
ops::UnpoolKernel<paddle::platform::GPUPlace, double>); ops::UnpoolKernel<paddle::platform::GPUPlace, double>);
REGISTER_OP_GPU_KERNEL( REGISTER_OP_GPU_KERNEL(
unpool_grad, ops::UnpoolGradKernel<paddle::platform::GPUPlace, float>, unpool_grad, ops::UnpoolGradKernel<paddle::platform::GPUPlace, float>,
ops::UnpoolGradKernel<paddle::platform::GPUPlace, double>); ops::UnpoolGradKernel<paddle::platform::GPUPlace, double>);
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册