提交 f846e8fe 编写于 作者: L liaogang

Add const for getLayerOutput

上级 84552872
...@@ -134,7 +134,7 @@ public: ...@@ -134,7 +134,7 @@ public:
backward(callback); backward(callback);
} }
virtual MatrixPtr getLayerOutput(const std::string& layerName) { virtual MatrixPtr getLayerOutput(const std::string& layerName) const {
return nullptr; return nullptr;
} }
......
...@@ -282,7 +282,8 @@ void MultiGradientMachine::forwardBackward(const std::vector<Argument>& inArgs, ...@@ -282,7 +282,8 @@ void MultiGradientMachine::forwardBackward(const std::vector<Argument>& inArgs,
backwardImp(callback); backwardImp(callback);
} }
MatrixPtr MultiGradientMachine::getLayerOutput(const std::string& layerName) { MatrixPtr MultiGradientMachine::getLayerOutput(
const std::string& layerName) const {
// each thread has the same neural network // each thread has the same neural network
auto nn = threads_[0]->getGradientMachine(); auto nn = threads_[0]->getGradientMachine();
size_t height = 0; size_t height = 0;
...@@ -301,11 +302,10 @@ MatrixPtr MultiGradientMachine::getLayerOutput(const std::string& layerName) { ...@@ -301,11 +302,10 @@ MatrixPtr MultiGradientMachine::getLayerOutput(const std::string& layerName) {
// copy one layer output from one trainer thread at each time // copy one layer output from one trainer thread at each time
size_t startRow = 0; size_t startRow = 0;
for (auto& mat : mats) {
for (size_t i = 0; i < threads_.size(); i++) { auto tmpMatrix = layerOutput->subMatrix(startRow, mat->getHeight());
auto tmpMatrix = layerOutput->subMatrix(startRow, mats[i]->getHeight()); tmpMatrix->copyFrom(*mat);
tmpMatrix->copyFrom(*mats[i]); startRow += mat->getHeight();
startRow += mats[i]->getHeight();
} }
return layerOutput; return layerOutput;
......
...@@ -189,7 +189,7 @@ public: ...@@ -189,7 +189,7 @@ public:
PassType passType, PassType passType,
const UpdateCallback& callback); const UpdateCallback& callback);
virtual MatrixPtr getLayerOutput(const std::string& layerName); virtual MatrixPtr getLayerOutput(const std::string& layerName) const;
virtual void onPassEnd(); virtual void onPassEnd();
......
...@@ -293,7 +293,7 @@ void NeuralNetwork::backward(const UpdateCallback& callback) { ...@@ -293,7 +293,7 @@ void NeuralNetwork::backward(const UpdateCallback& callback) {
} }
} }
MatrixPtr NeuralNetwork::getLayerOutput(const std::string& layerName) { MatrixPtr NeuralNetwork::getLayerOutput(const std::string& layerName) const {
auto it = layerMap_.find(layerName); auto it = layerMap_.find(layerName);
CHECK(it != layerMap_.end()) << "Cannot find layer: " << layerName; CHECK(it != layerMap_.end()) << "Cannot find layer: " << layerName;
return it->second->getOutputValue(); return it->second->getOutputValue();
......
...@@ -87,7 +87,7 @@ public: ...@@ -87,7 +87,7 @@ public:
virtual void backward(const UpdateCallback& callback = nullptr); virtual void backward(const UpdateCallback& callback = nullptr);
virtual MatrixPtr getLayerOutput(const std::string& layerName); virtual MatrixPtr getLayerOutput(const std::string& layerName) const;
const LayerPtr& getLayer(const std::string& layerName) const { const LayerPtr& getLayer(const std::string& layerName) const {
auto it = layerMap_.find(layerName); auto it = layerMap_.find(layerName);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册