diff --git a/paddle/gserver/gradientmachines/GradientMachine.h b/paddle/gserver/gradientmachines/GradientMachine.h index a814e771d12e3ae71212fd62c06a4f56338286ae..5469c0d89f49648743f3a7e851694e4e1e736fb2 100644 --- a/paddle/gserver/gradientmachines/GradientMachine.h +++ b/paddle/gserver/gradientmachines/GradientMachine.h @@ -134,7 +134,7 @@ public: backward(callback); } - virtual MatrixPtr getLayerOutput(const std::string& layerName) { + virtual MatrixPtr getLayerOutput(const std::string& layerName) const { return nullptr; } diff --git a/paddle/gserver/gradientmachines/MultiGradientMachine.cpp b/paddle/gserver/gradientmachines/MultiGradientMachine.cpp index 7e60920376d195c708ee5aafff25924aaabe17be..2d42e648302f1673ae067ded43db4502679a6625 100644 --- a/paddle/gserver/gradientmachines/MultiGradientMachine.cpp +++ b/paddle/gserver/gradientmachines/MultiGradientMachine.cpp @@ -282,7 +282,8 @@ void MultiGradientMachine::forwardBackward(const std::vector& inArgs, backwardImp(callback); } -MatrixPtr MultiGradientMachine::getLayerOutput(const std::string& layerName) { +MatrixPtr MultiGradientMachine::getLayerOutput( + const std::string& layerName) const { // each thread has the same neural network auto nn = threads_[0]->getGradientMachine(); size_t height = 0; @@ -301,11 +302,10 @@ MatrixPtr MultiGradientMachine::getLayerOutput(const std::string& layerName) { // copy one layer output from one trainer thread at each time size_t startRow = 0; - - for (size_t i = 0; i < threads_.size(); i++) { - auto tmpMatrix = layerOutput->subMatrix(startRow, mats[i]->getHeight()); - tmpMatrix->copyFrom(*mats[i]); - startRow += mats[i]->getHeight(); + for (auto& mat : mats) { + auto tmpMatrix = layerOutput->subMatrix(startRow, mat->getHeight()); + tmpMatrix->copyFrom(*mat); + startRow += mat->getHeight(); } return layerOutput; diff --git a/paddle/gserver/gradientmachines/MultiGradientMachine.h b/paddle/gserver/gradientmachines/MultiGradientMachine.h index 988d5098179806fc75aa2fae5dcc4330d7963257..a1a2d417062de5a1adeb83506b2a30339a816a13 100644 --- a/paddle/gserver/gradientmachines/MultiGradientMachine.h +++ b/paddle/gserver/gradientmachines/MultiGradientMachine.h @@ -189,7 +189,7 @@ public: PassType passType, const UpdateCallback& callback); - virtual MatrixPtr getLayerOutput(const std::string& layerName); + virtual MatrixPtr getLayerOutput(const std::string& layerName) const; virtual void onPassEnd(); diff --git a/paddle/gserver/gradientmachines/NeuralNetwork.cpp b/paddle/gserver/gradientmachines/NeuralNetwork.cpp index 1f9ace4f67fdcab6af522277fa83bb0e6044360d..00887c81d47687f3f3f976c2050289a3c73885e9 100644 --- a/paddle/gserver/gradientmachines/NeuralNetwork.cpp +++ b/paddle/gserver/gradientmachines/NeuralNetwork.cpp @@ -293,7 +293,7 @@ void NeuralNetwork::backward(const UpdateCallback& callback) { } } -MatrixPtr NeuralNetwork::getLayerOutput(const std::string& layerName) { +MatrixPtr NeuralNetwork::getLayerOutput(const std::string& layerName) const { auto it = layerMap_.find(layerName); CHECK(it != layerMap_.end()) << "Cannot find layer: " << layerName; return it->second->getOutputValue(); diff --git a/paddle/gserver/gradientmachines/NeuralNetwork.h b/paddle/gserver/gradientmachines/NeuralNetwork.h index bf9ed09327f2f13585a2b37993f3139fe6cb862b..6ecc251a409cb9daefce704dd89027b9bd03744c 100644 --- a/paddle/gserver/gradientmachines/NeuralNetwork.h +++ b/paddle/gserver/gradientmachines/NeuralNetwork.h @@ -87,7 +87,7 @@ public: virtual void backward(const UpdateCallback& callback = nullptr); - virtual MatrixPtr getLayerOutput(const std::string& layerName); + virtual MatrixPtr getLayerOutput(const std::string& layerName) const; const LayerPtr& getLayer(const std::string& layerName) const { auto it = layerMap_.find(layerName);