提交 90f664d0 编写于 作者: S sweetsky0901

test unpool ok cpu

上级 822f2834
......@@ -80,13 +80,6 @@ function(op_library TARGET)
file(APPEND ${pybind_file} "USE_OP(pool2d);\n")
endif()
# unpool_op contains several operators
if ("${TARGET}" STREQUAL "unpool_op")
set(pybind_flag 1)
# It's enough to just adding one operator to pybind
file(APPEND ${pybind_file} "USE_OP(unpool2d);\n")
endif()
# pool_cudnn_op contains several operators
if ("${TARGET}" STREQUAL "pool_cudnn_op")
set(pybind_flag 1)
......
......@@ -32,13 +32,13 @@ class Unpool2dMaxFunctor<platform::CPUPlace, T> {
const int output_channels = output->dims()[1];
const int output_height = output->dims()[2];
const int output_width = output->dims()[3];
int input_feasize = input_height * input_width;
int output_feasize = output_height * output_width;
const T* input_data = input.data<T>();
const int * indices_data = indices.data<int>();
const T * indices_data = indices.data<T>();
T* output_data = output->mutable_data<T>(context.GetPlace());
memset(output_data, 0, \
sizeof(T) * output_feasize * output_channels * batch_size);
for (int b = 0; b < batch_size; ++b) {
for (int c = 0; c < output_channels; ++c) {
for (int i = 0; i < input_feasize; ++i) {
......@@ -74,9 +74,8 @@ public:
int input_feasize = input_height * input_width;
int output_feasize = output_height * output_width;
const int* indices_data = indices.data<int>();
const T* indices_data = indices.data<T>();
const T* output_grad_data = output_grad.data<T>();
T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace());
for (int b = 0; b < batch_size; ++b) {
......
......@@ -76,7 +76,7 @@ class Unpool2dMaxFunctor<platform::GPUPlace, T> {
const int output_height = output->dims()[2];
const int output_width = output->dims()[3];
const T* input_data = input.data<T>();
const int* indices_data = indices.data<int>();
const T* indices_data = indices.data<T>();
T* output_data = output->mutable_data<T>(context.GetPlace());
int nthreads = output->numel();
......@@ -111,7 +111,7 @@ class Unpool2dMaxGradFunctor<platform::GPUPlace, T> {
const int output_height = output.dims()[2];
const int output_width = output.dims()[3];
const T* input_data = input.data<T>();
const int* indices_data = indices.data<int>();
const T* indices_data = indices.data<T>();
const T* output_data = output.data<T>();
const T* output_grad_data = output_grad.data<T>();
T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace());
......
......@@ -48,7 +48,7 @@ class Unpool2dOpMaker : public framework::OpProtoAndCheckerMaker {
"(vector defalut:{0,0}), "
"paddings(height, width) of unpooling operator.")
.SetDefault({0, 0});
AddAttr<std::string>("unpoolingType",
AddAttr<std::string>("unpoolingtype",
"(string), unpooling type, can be \"max\" for max-unpooling ")
.InEnum({"max"});
AddComment(R"DOC(
......@@ -80,8 +80,8 @@ class UnpoolOp : public framework::OperatorWithKernel {
auto in_x_dims = ctx->GetInputDim("X");
auto in_y_dims = ctx->GetInputDim("Y");
std::string unpooling_type = \
ctx->Attrs().Get<std::string>("unpooling_type");
std::string unpoolingtype = \
ctx->Attrs().Get<std::string>("unpoolingtype");
std::vector<int> ksize = ctx->Attrs().Get<std::vector<int>>("ksize");
std::vector<int> strides = ctx->Attrs().Get<std::vector<int>>("strides");
std::vector<int> paddings = ctx->Attrs().Get<std::vector<int>>("paddings");
......@@ -108,9 +108,9 @@ class UnpoolOpGrad : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) must not be null.");
PADDLE_ENFORCE(ctx->HasInput("Y"), "Input(Y) must not be null.");
PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")),
"Input(Out@GRAD) should not be null");
// PADDLE_ENFORCE(ctx->HasInput("Y"), "Input(Y) must not be null.");
// PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")),
// "Input(Out@GRAD) should not be null");
PADDLE_ENFORCE(ctx->HasOutput(framework::GradVarName("X")),
"Input(X@GRAD) should not be null.");
ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X"));
......@@ -120,13 +120,12 @@ class UnpoolOpGrad : public framework::OperatorWithKernel {
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP(unpool2d, ops::UnpoolOp, ops::Unpool2dOpMaker, unpool2d_grad,
REGISTER_OP(unpool, ops::UnpoolOp, ops::Unpool2dOpMaker, unpool_grad,
ops::UnpoolOpGrad);
REGISTER_OP_CPU_KERNEL(unpool2d,
REGISTER_OP_CPU_KERNEL(unpool,
ops::UnpoolKernel<paddle::platform::CPUPlace, float>,
ops::UnpoolKernel<paddle::platform::CPUPlace, double>);
REGISTER_OP_CPU_KERNEL(unpool2d_grad,
ops::UnpoolGradKernel<paddle::platform::CPUPlace,
float>,
ops::UnpoolGradKernel<paddle::platform::CPUPlace,
double>);
REGISTER_OP_CPU_KERNEL(unpool_grad,
ops::UnpoolGradKernel<paddle::platform::CPUPlace, float>,
ops::UnpoolGradKernel<paddle::platform::CPUPlace, double>);
......@@ -15,10 +15,10 @@
#include "paddle/operators/unpool_op.h"
namespace ops = paddle::operators;
REGISTER_OP_GPU_KERNEL(unpool2d,
REGISTER_OP_GPU_KERNEL(unpool,
ops::UnpoolKernel<paddle::platform::GPUPlace, float>,
ops::UnpoolKernel<paddle::platform::GPUPlace, double>);
REGISTER_OP_GPU_KERNEL(unpool2d_grad,
REGISTER_OP_GPU_KERNEL(unpool_grad,
ops::UnpoolGradKernel<paddle::platform::GPUPlace,
float>,
ops::UnpoolGradKernel<paddle::platform::GPUPlace,
......
......@@ -30,13 +30,13 @@ class UnpoolKernel : public framework::OpKernel<T> {
const Tensor* in_x = context.Input<Tensor>("X");
const Tensor* in_y = context.Input<Tensor>("Y");
Tensor* out = context.Output<Tensor>("Out");
std::string pooling_type = context.Attr<std::string>("unpooling_type");
std::string unpoolingtype = context.Attr<std::string>("unpoolingtype");
std::vector<int> ksize = context.Attr<std::vector<int>>("ksize");
std::vector<int> strides = context.Attr<std::vector<int>>("strides");
std::vector<int> paddings = context.Attr<std::vector<int>>("paddings");
switch (ksize.size()) {
case 2: {
if (pooling_type == "max") {
if (unpoolingtype == "max") {
math::Unpool2dMaxFunctor<Place, T> unpool2d_max_forward;
unpool2d_max_forward(context.device_context(), *in_x, *in_y, out);
}
......@@ -56,7 +56,7 @@ class UnpoolGradKernel : public framework::OpKernel<T> {
const Tensor* out_grad =
context.Input<Tensor>(framework::GradVarName("Out"));
Tensor* in_x_grad = context.Output<Tensor>(framework::GradVarName("X"));
std::string pooling_type = context.Attr<std::string>("unpooling_type");
std::string unpoolingtype = context.Attr<std::string>("unpoolingtype");
std::vector<int> ksize = context.Attr<std::vector<int>>("ksize");
std::vector<int> strides = context.Attr<std::vector<int>>("strides");
std::vector<int> paddings = context.Attr<std::vector<int>>("paddings");
......@@ -69,7 +69,7 @@ class UnpoolGradKernel : public framework::OpKernel<T> {
}
switch (ksize.size()) {
case 2: {
if (pooling_type == "max") {
if (unpoolingtype == "max") {
math::Unpool2dMaxGradFunctor<Place, T> unpool2d_max_backward;
unpool2d_max_backward(context.device_context(), *in_x, *in_y, in_x_grad,
*out, *out_grad);
......
import unittest
import numpy as np
from op_test import OpTest
def maxout_forward_naive(input, groups):
s0, s1, s2, s3 = input.shape
return np.ndarray([s0, s1 / groups, groups, s2, s3], \
buffer = input, dtype=input.dtype).max(axis=(2))
class TestUnpool2dOp(OpTest):
def setUp(self):
self.op_type = "unpool2d"
self.init_test_case()
input = np.random.random(self.shape).astype("float32")
output = self.MaxOut_forward_naive(input, self.groups).astype("float32")
self.inputs = {'X': input}
self.attrs = {
'strides': self.strides,
'paddings': self.paddings,
'ksize': self.ksize,
'unpooling_type': self.pool_type,
}
self.outputs = {'Out': output.astype('float32')}
def init_pool_type(self):
self.pool_type = "max"
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out')
def init_test_case(self):
self.MaxOut_forward_naive = maxout_forward_naive
self.shape = [100, 6, 2, 2]
self.groups=2
if __name__ == '__main__':
unittest.main()
import unittest
import numpy as np
from op_test import OpTest
def unpool2dmax_forward_naive(input, indices, ksize, strides, paddings):
s0, s1, s2, s3 = input.shape
out_H=(s2 - 1) * strides[0] - 2 * paddings[0] + ksize[0]
out_W=(s2 - 1) * strides[1] - 2 * paddings[1] + ksize[1]
out = np.zeros((s0, s1, out_H, out_W))
for nidx in xrange(s0):
for cidx in xrange(s1):
for h in xrange(s2):
for w in xrange(s3):
index = indices[nidx, cidx, h, w]
hidx = (index - index % out_W) / out_W
widx = index % out_W
out[nidx, cidx, int(hidx), int(widx)] = input[nidx, cidx, h, w]
return out
class TestUnpoolOp(OpTest):
def setUp(self):
self.op_type = "unpool"
self.init_test_case()
pre_input = np.random.random(self.shape).astype("float32")
N, C, H, W = pre_input.shape
H_out = (H - self.ksize[0] + 2 * self.paddings[0]) / self.strides[0] + 1
W_out = (W - self.ksize[1] + 2 * self.paddings[1]) / self.strides[1] + 1
input = np.zeros((N, C, H_out, W_out))
indices = np.zeros((N, C, H_out, W_out))
for i in xrange(H_out):
for j in xrange(W_out):
r_start = np.max((i * self.strides[0] - self.paddings[0], 0))
r_end = np.min((i * self.strides[0] + self.ksize[0] - self.paddings[0], H))
c_start = np.max((j * self.strides[1] - self.paddings[1], 0))
c_end = np.min((j * self.strides[1] + self.ksize[1] - self.paddings[1], W))
for nidx in xrange(N):
for cidx in xrange(C):
x_masked = pre_input[nidx, cidx, r_start:r_end, c_start:c_end]
input[nidx, cidx, i, j] = x_masked.max()
arg = x_masked.argmax()
indices[nidx, cidx, i, j] = (r_start + arg / self.ksize[1]) * W + c_start + arg % self.ksize[1]
output = self.Unpool2d_forward_naive(input, indices, self.ksize, self.strides, self.paddings).astype("float32")
self.inputs = {'X': input.astype('float32'),
'Y': indices.astype('int16')}
self.attrs = {
'strides': self.strides,
'paddings': self.paddings,
'ksize': self.ksize,
'unpoolingtype': self.unpoolingtype,
}
self.outputs = {'Out': output.astype('float32')}
def test_check_output(self):
print self.outputs['Out']
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out', max_relative_error=0.5)
def init_test_case(self):
self.Unpool2d_forward_naive = unpool2dmax_forward_naive
self.unpoolingtype = "max"
self.shape = [10, 2, 5, 5]
self.ksize = [3, 3]
self.strides = [2, 2]
self.paddings = [0, 0]
if __name__ == '__main__':
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册