提交 84552872 编写于 作者: L liaogang

getLayerOutput in CPU

上级 3842bc4d
......@@ -134,7 +134,9 @@ public:
backward(callback);
}
virtual MatrixPtr getLayerOutput(const std::string& layerName) = 0;
virtual MatrixPtr getLayerOutput(const std::string& layerName) {
return nullptr;
}
// see comment in Layer.h for the function with the same name
virtual void resetState() {}
......
......@@ -283,7 +283,7 @@ void MultiGradientMachine::forwardBackward(const std::vector<Argument>& inArgs,
}
MatrixPtr MultiGradientMachine::getLayerOutput(const std::string& layerName) {
// each thread has the same neuro network
// each thread has the same neural network
auto nn = threads_[0]->getGradientMachine();
size_t height = 0;
size_t width = nn->getLayerOutput(layerName)->getWidth();
......@@ -297,21 +297,17 @@ MatrixPtr MultiGradientMachine::getLayerOutput(const std::string& layerName) {
}
MatrixPtr layerOutput;
Matrix::resizeOrCreate(layerOutput, height, width, false, useGpu_);
Matrix::resizeOrCreate(layerOutput, height, width, false, false);
// copy one layer output from one trainer thread at each time
size_t startRow = 0;
for (size_t i = 0; i < threads_.size(); i++) {
auto tmpMatrix = layerOutput->subMatrix(startRow, mats[i]->getHeight());
tmpMatrix->copyFrom(*mats[i], HPPL_STREAM_DEFAULT);
tmpMatrix->copyFrom(*mats[i]);
startRow += mats[i]->getHeight();
}
if (useGpu_) {
hl_stream_synchronize(HPPL_STREAM_DEFAULT);
}
return layerOutput;
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册