From 84552872a337b42252233023191698f992aa5808 Mon Sep 17 00:00:00 2001 From: liaogang Date: Fri, 17 Feb 2017 16:09:50 +0800 Subject: [PATCH] getLayerOutput in CPU --- paddle/gserver/gradientmachines/GradientMachine.h | 4 +++- .../gserver/gradientmachines/MultiGradientMachine.cpp | 10 +++------- 2 files changed, 6 insertions(+), 8 deletions(-) diff --git a/paddle/gserver/gradientmachines/GradientMachine.h b/paddle/gserver/gradientmachines/GradientMachine.h index 201b65bc4..a814e771d 100644 --- a/paddle/gserver/gradientmachines/GradientMachine.h +++ b/paddle/gserver/gradientmachines/GradientMachine.h @@ -134,7 +134,9 @@ public: backward(callback); } - virtual MatrixPtr getLayerOutput(const std::string& layerName) = 0; + virtual MatrixPtr getLayerOutput(const std::string& layerName) { + return nullptr; + } // see comment in Layer.h for the function with the same name virtual void resetState() {} diff --git a/paddle/gserver/gradientmachines/MultiGradientMachine.cpp b/paddle/gserver/gradientmachines/MultiGradientMachine.cpp index db13a8868..7e6092037 100644 --- a/paddle/gserver/gradientmachines/MultiGradientMachine.cpp +++ b/paddle/gserver/gradientmachines/MultiGradientMachine.cpp @@ -283,7 +283,7 @@ void MultiGradientMachine::forwardBackward(const std::vector& inArgs, } MatrixPtr MultiGradientMachine::getLayerOutput(const std::string& layerName) { - // each thread has the same neuro network + // each thread has the same neural network auto nn = threads_[0]->getGradientMachine(); size_t height = 0; size_t width = nn->getLayerOutput(layerName)->getWidth(); @@ -297,21 +297,17 @@ MatrixPtr MultiGradientMachine::getLayerOutput(const std::string& layerName) { } MatrixPtr layerOutput; - Matrix::resizeOrCreate(layerOutput, height, width, false, useGpu_); + Matrix::resizeOrCreate(layerOutput, height, width, false, false); // copy one layer output from one trainer thread at each time size_t startRow = 0; for (size_t i = 0; i < threads_.size(); i++) { auto tmpMatrix = layerOutput->subMatrix(startRow, mats[i]->getHeight()); - tmpMatrix->copyFrom(*mats[i], HPPL_STREAM_DEFAULT); + tmpMatrix->copyFrom(*mats[i]); startRow += mats[i]->getHeight(); } - if (useGpu_) { - hl_stream_synchronize(HPPL_STREAM_DEFAULT); - } - return layerOutput; } -- GitLab