提交 1a02369a 编写于 作者: H hedaoyuan

Add releaseOutput interface for release the output memory of the middle layer.

上级 a096c58e
......@@ -233,6 +233,13 @@ public:
(void)numProcessed;
}
/**
* @brief Release the middle layer's output memory.
*
* @note This function is used for memory optimization in inference.
*/
virtual void releaseOutput() {}
protected:
virtual void onLoadParameter() {}
......
......@@ -187,6 +187,31 @@ void NeuralNetwork::init(const ModelConfig& config,
CHECK(it != layerMap_.end());
outputLayers_.push_back(it->second);
}
for (const auto& layer : layers_) {
const auto name& = layer->getName();
bool isMiddleLayer = true;
// if data layer
for (const auto& dataLayer : dataLayers_) {
if (name == dataLayer->getName()) {
isMiddleLayer = false;
break;
}
}
// if output layer
for (const auto& dataLayer : outputLayers_) {
if (name == dataLayer->getName()) {
isMiddleLayer = false;
break;
}
}
if (isMiddleLayer) {
middleLayers_.push_back(layer);
}
}
}
void NeuralNetwork::connect(LayerPtr agentLayer,
......@@ -327,6 +352,13 @@ void NeuralNetwork::onPassEnd() {
}
}
void NeuralNetwork::releaseOutput() {
for (auto& layer : middleLayers_) {
Argument& arg = layer->getOutput();
arg.value.reset();
}
}
#ifndef PADDLE_MOBILE_INFERENCE
class CombinedEvaluator : public Evaluator {
......
......@@ -137,6 +137,13 @@ public:
/// some finish work, like convert the weight format of MKLDNNLayers
void finish();
/**
* @brief Release the middle layer's output memory.
*
* @note This function is used for memory optimization in inference.
*/
void releaseOutput();
protected:
/**
* The constructor of NeuralNetwork.
......@@ -158,6 +165,7 @@ protected:
std::vector<DataLayerPtr> dataLayers_;
std::vector<LayerPtr> outputLayers_;
std::vector<LayerPtr> middleLayers_;
static std::map<std::string, bool> dllInitMap;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册