diff --git a/paddle/function/Function.h b/paddle/function/Function.h index 9ad00c6f370cf64e9cc26f16e62c4d2ddb284003..15eb35b7f7dac1b98f2d8694707d83b84bda0f2e 100644 --- a/paddle/function/Function.h +++ b/paddle/function/Function.h @@ -38,7 +38,7 @@ public: if (err) { *err = Error(e.what()); } else { - LOG(FATAL) << "Cannot get key " << key << "with error " << e.what(); + LOG(FATAL) << "Cannot get key " << key << " with error " << e.what(); } return T(); } diff --git a/paddle/function/PadOpGpu.cu b/paddle/function/PadOpGpu.cu index 9104b1aca507c526858c2117e0a5db59f535091e..9094f1528433fdcaad3397a991aa8ac6fa04bc01 100644 --- a/paddle/function/PadOpGpu.cu +++ b/paddle/function/PadOpGpu.cu @@ -44,9 +44,9 @@ void Pad(real* outputs, size_t nth = num * inC * inH * inW; int blockSize = 1024; int gridSize = (nth + 1024 - 1) / 1024; - int cstart = pad.channelStart, cend = pad.channelEnd; - int hstart = pad.heightStart, hend = pad.heightEnd; - int wstart = pad.widthStart, wend = pad.widthEnd; + int cstart = pad.channel[0], cend = pad.channel[1]; + int hstart = pad.height[0], hend = pad.height[1]; + int wstart = pad.width[0], wend = pad.width[1]; int outC = inC + cstart + cend; int outH = inH + hstart + hend; int outW = inW + wstart + wend; @@ -83,9 +83,9 @@ void PadGrad(real* inGrad, int nth = num * inC * inH * inW; int blockSize = 1024; int gridSize = (nth + 1024 - 1) / 1024; - int cstart = pad.channelStart, cend = pad.channelEnd; - int hstart = pad.heightStart, hend = pad.heightEnd; - int wstart = pad.widthStart, wend = pad.widthEnd; + int cstart = pad.channel[0], cend = pad.channel[1]; + int hstart = pad.height[0], hend = pad.height[1]; + int wstart = pad.width[0], wend = pad.width[1]; int outC = inC + cstart + cend; int outH = inH + hstart + hend; int outW = inW + wstart + wend; diff --git a/paddle/function/PadOpTest.cpp b/paddle/function/PadOpTest.cpp index cd22d9113567912f7694e05e5d631e49d940e3ac..f77ac2a8c49c83f2d6c64c2a30b6a2f2eb09ac10 100644 --- a/paddle/function/PadOpTest.cpp +++ b/paddle/function/PadOpTest.cpp @@ -24,48 +24,22 @@ TEST(Pad, real) { for (size_t imgSizeW : {5, 32, 96}) { VLOG(3) << " numSamples=" << numSamples << " channels=" << channels << " imgSizeH=" << imgSizeH << " imgSizeW=" << imgSizeW; - - FunctionCompare compare("Pad", - FuncConfig() - .set("cstart", 2) - .set("cend", 3) - .set("hstart", 1) - .set("hend", 2) - .set("wstart", 3) - .set("wend", 2)); - TensorShape inDims{numSamples, channels, imgSizeH, imgSizeW}; - TensorShape outDims{ - numSamples, channels + 5, imgSizeH + 3, imgSizeW + 5}; - compare.addInputs(BufferArg(VALUE_TYPE_FLOAT, inDims)); - compare.addOutputs(BufferArg(VALUE_TYPE_FLOAT, outDims, ASSIGN_TO)); - compare.run(); - } - } - } - } -} - -TEST(PadGrad, real) { - for (size_t numSamples : {5, 32}) { - for (size_t channels : {1, 5, 32}) { - for (size_t imgSizeH : {5, 33, 100}) { - for (size_t imgSizeW : {5, 32, 96}) { - VLOG(3) << " numSamples=" << numSamples << " channels=" << channels - << " imgSizeH=" << imgSizeH << " imgSizeW=" << imgSizeW; - FunctionCompare compare("PadGrad", - FuncConfig() - .set("cstart", 2) - .set("cend", 3) - .set("hstart", 1) - .set("hend", 2) - .set("wstart", 3) - .set("wend", 2)); - TensorShape inDims{numSamples, channels, imgSizeH, imgSizeW}; - TensorShape outDims{ - numSamples, channels + 5, imgSizeH + 3, imgSizeW + 5}; - compare.addInputs(BufferArg(VALUE_TYPE_FLOAT, outDims)); - compare.addOutputs(BufferArg(VALUE_TYPE_FLOAT, inDims, ASSIGN_TO)); - compare.run(); + for (bool test_grad : {false, true}) { + FunctionCompare compare( + test_grad ? "PadGrad" : "Pad", + FuncConfig() + .set>("channel", {2, 3}) + .set>("height", {1, 2}) + .set>("width", {3, 2})); + TensorShape inDims{numSamples, channels, imgSizeH, imgSizeW}; + TensorShape outDims{ + numSamples, channels + 5, imgSizeH + 3, imgSizeW + 5}; + compare.addInputs( + BufferArg(VALUE_TYPE_FLOAT, test_grad ? outDims : inDims)); + compare.addOutputs(BufferArg( + VALUE_TYPE_FLOAT, test_grad ? inDims : outDims, ASSIGN_TO)); + compare.run(); + } } } }