未验证 提交 480a5446 编写于 作者: Q qingqing01 提交者: GitHub

Merge pull request #6659 from qingqing01/mobile_mem

Reduce memory usage in conv layer and RoI layer for mobile inference.
......@@ -126,6 +126,11 @@ public:
inputData += inputChannels * inputHeight * inputWidth;
outputData += outputChannels * outputHeight * outputWidth;
}
#ifdef PADDLE_MOBILE_INFERENCE
if (Device == DEVICE_TYPE_CPU) {
delete memory_;
}
#endif
}
};
......
......@@ -84,12 +84,15 @@ void ROIPoolLayer::forward(PassType passType) {
size_t poolChannelOffset = pooledHeight_ * pooledWidth_;
real* outputData = outputValue->getData();
Matrix::resizeOrCreate(maxIdxs_,
numROIs,
channels_ * pooledHeight_ * pooledWidth_,
false,
false);
real* argmaxData = maxIdxs_->getData();
real* argmaxData = nullptr;
if (passType != PASS_TEST) {
Matrix::resizeOrCreate(maxIdxs_,
numROIs,
channels_ * pooledHeight_ * pooledWidth_,
false,
false);
argmaxData = maxIdxs_->getData();
}
for (size_t n = 0; n < numROIs; ++n) {
// the first five elememts of each RoI should be:
......@@ -128,14 +131,18 @@ void ROIPoolLayer::forward(PassType passType) {
bool isEmpty = (hend <= hstart) || (wend <= wstart);
size_t poolIndex = ph * pooledWidth_ + pw;
outputData[poolIndex] = isEmpty ? 0 : -FLT_MAX;
argmaxData[poolIndex] = -1;
if (argmaxData) {
argmaxData[poolIndex] = -1;
}
for (size_t h = hstart; h < hend; ++h) {
for (size_t w = wstart; w < wend; ++w) {
size_t index = h * width_ + w;
if (batchData[index] > outputData[poolIndex]) {
outputData[poolIndex] = batchData[index];
argmaxData[poolIndex] = index;
if (argmaxData) {
argmaxData[poolIndex] = index;
}
}
}
}
......@@ -143,7 +150,9 @@ void ROIPoolLayer::forward(PassType passType) {
}
batchData += channelOffset;
outputData += poolChannelOffset;
argmaxData += poolChannelOffset;
if (argmaxData) {
argmaxData += poolChannelOffset;
}
}
bottomROIs += roiOffset;
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册