提交 d3ac3393 编写于 作者: G gongweibao

fix bugs

上级 32db8db5
......@@ -23,7 +23,6 @@ class BlockExpandOp : public framework::OperatorWithKernel {
protected:
void InferShape(framework::InferShapeContext* ctx) const override {
printf("op infershape\n");
using namespace framework;
PADDLE_ENFORCE(ctx->HasInput("X"),
"Input of BlockExpandOp should not be null.");
......@@ -34,7 +33,6 @@ class BlockExpandOp : public framework::OperatorWithKernel {
PADDLE_ENFORCE_EQ(in_dim.size(), 4, "Input format must be NCHW.");
PADDLE_ENFORCE_GE(in_dim[0], 1, "Input batchsize must >= 1.");
printf("op infershape2\n");
int block_height = ctx->Attrs().Get<int>("blockHeight");
int block_width = ctx->Attrs().Get<int>("blockWidth");
int stride_height = ctx->Attrs().Get<int>("strideHeight");
......@@ -60,8 +58,6 @@ class BlockExpandOp : public framework::OperatorWithKernel {
// reshape into [seqLength, stepSize], where seqLength is equal
// output_height * output_width, stepSize is equal
// input_channels * blockHeight * blockWidth
printf("N:%d, o_h:%d o_w:%d C:%d b_h:%d b_w:%d\n", N, output_height,
output_width, C, block_height, block_width);
ctx->SetOutputDim(
"Out", {N, output_height, output_width, C, block_height, block_width});
......@@ -81,7 +77,6 @@ class BlockExpandOpMaker : public framework::OpProtoAndCheckerMaker {
H: height
W: width
)DOC");
printf("opmakeer\n");
AddOutput("Out", "(LodTensor)The output data of block_expand op,");
AddAttr<int>("blockHeight", "(int)height of block.");
AddAttr<int>("blockWidth", "(int)width of block.");
......@@ -117,14 +112,9 @@ class BlockExpandGradOp : public framework::OperatorWithKernel {
void InferShape(framework::InferShapeContext* ctx) const override {
using namespace framework;
PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null");
PADDLE_ENFORCE(ctx->HasOutput("Out"),
"Output of BlockExpandOp op should not be null.");
PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")),
"Input(Out@GRAD) should not be null");
auto in_dim = ctx->GetInputDim("X");
ctx->SetOutputDim(GradVarName("Out"), in_dim);
"Input(Out@GRAD) shouldn't be null.");
ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X"));
}
};
......
......@@ -68,11 +68,7 @@ class BlockExpandKernel : public framework::OpKernel<T> {
img_height, img_width, block_height, block_width, stride_height,
stride_width, padding_height, padding_width, outputHeight, outputWidth);
printf("N:%d, o_h:%d o_w:%d C:%d b_h:%d b_w:%d\n", N, outputHeight,
outputWidth, C, block_height, block_width);
for (int i = 0; i < N; i++) {
printf("i:%d\n", i);
Tensor src = in->Slice<T>(i, i + 1).Resize({C, img_height, img_width});
Tensor dst = out->Slice<T>(i, i + 1).Resize(
{outputHeight, outputWidth, C, block_height, block_width});
......@@ -89,9 +85,12 @@ class BlockExpandGradKernel : public framework::OpKernel<T> {
void Compute(const framework::ExecutionContext& ctx) const override {
using namespace framework;
auto* in = ctx.Input<Tensor>("X");
auto* out = ctx.Input<Tensor>("Out");
auto* out_grad = ctx.Output<Tensor>(GradVarName("Out"));
out_grad->mutable_data<T>(ctx.GetPlace());
auto* d_out = ctx.Input<Tensor>(framework::GradVarName("Out"));
auto* d_x = ctx.Output<Tensor>(GradVarName("X"));
d_x->mutable_data<T>(ctx.GetPlace());
auto x_v = framework::EigenVector<T>::Flatten(*d_x);
x_v.device(ctx.GetEigenDevice<Place>()) = x_v.constant(0.0);
auto in_dim = in->dims();
int N = in_dim[0];
......@@ -113,16 +112,12 @@ class BlockExpandGradKernel : public framework::OpKernel<T> {
img_height, img_width, block_height, block_width, stride_height,
stride_width, padding_height, padding_width, outputHeight, outputWidth);
printf("N:%d, o_h:%d o_w:%d C:%d b_h:%d b_w:%d\n", N, outputHeight,
outputWidth, C, block_height, block_width);
for (int i = 0; i < N; i++) {
Tensor dst =
out_grad->Slice<T>(i, i + 1).Resize({C, img_height, img_width});
Tensor src = out->Slice<T>(i, i + 1).Resize(
Tensor dst = d_x->Slice<T>(i, i + 1).Resize({C, img_height, img_width});
Tensor src = d_out->Slice<T>(i, i + 1).Resize(
{outputHeight, outputWidth, C, block_height, block_width});
math::Im2ColFunctor<math::ColFormat::kOCF, Place, T> f;
f(ctx.device_context(), src, dst, stride_height, stride_width,
math::Col2ImFunctor<math::ColFormat::kOCF, Place, T> f;
f(ctx.device_context(), dst, src, stride_height, stride_width,
padding_height, padding_width);
}
}
......
......@@ -113,16 +113,30 @@ def col2img(attrs, col, img):
col[col_row_idx][col_col_idx][channel][filter_row_idx][filter_col_idx]
class TestBlockExpandMulOp(OpTest):
class TestBlockExpandOp(OpTest):
def get_input_data(self, C, H, W):
x = np.random.uniform(0.1, 1, [C, H, W]).astype("float32")
for c in range(0, C):
for h in range(0, H):
for w in range(0, W):
#x[c][h][w] = c * H * W + h *W + w
x[c][h][w] = 0.2 + 0.01 * (c * H * W + h * W + w)
return x
def setUp(self):
x = np.random.uniform(0.1, 1, [3, 9, 9]).astype("float32")
C = 3
H = 4
W = 4
x = self.get_input_data(C, H, W)
#print x
attrs = {
'blockHeight': 3,
'blockWidth': 3,
'strideHeight': 2,
'strideWidth': 2,
'paddingHeight': 3,
'paddingWidth': 3,
'blockHeight': 2,
'blockWidth': 2,
'strideHeight': 1,
'strideWidth': 1,
'paddingHeight': 1,
'paddingWidth': 1,
}
outputHeight, outputWidth = get_output_shape(attrs, x)
......@@ -131,7 +145,7 @@ class TestBlockExpandMulOp(OpTest):
attrs['blockHeight'], attrs['blockWidth']]).astype("float32")
self.op_type = "block_expand"
self.inputs = {'X': x.reshape(1, 3, 9, 9)}
self.inputs = {'X': x.reshape(1, C, H, W)}
self.attrs = attrs
im2col(attrs, x, out)
......@@ -139,16 +153,14 @@ class TestBlockExpandMulOp(OpTest):
'Out':out.reshape(1, outputHeight, outputWidth, x.shape[0], \
attrs['blockHeight'], attrs['blockWidth'])
}
#print out
"""
def test_check_output(self):
self.check_output()
print 1
"""
def test_check_grad_normal(self):
self.check_grad(['X'], 'Out')
"""
self.check_grad(['X'], 'Out', max_relative_error=0.01)
if __name__ == '__main__':
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册