From fbf864362dc1bd716a8db1f4441afe488fe3d74b Mon Sep 17 00:00:00 2001 From: liaogang Date: Wed, 22 Feb 2017 16:02:58 +0800 Subject: [PATCH] Update python getLayerOutputs --- demo/image_classification/prediction.py | 2 +- demo/model_zoo/resnet/classify.py | 2 +- paddle/api/Arguments.cpp | 7 ++++ paddle/api/GradientMachine.cpp | 5 ++- paddle/api/PaddleAPI.h | 5 +-- paddle/api/Trainer.cpp | 8 ++--- .../gradientmachines/GradientMachine.h | 4 +-- .../gradientmachines/MultiGradientMachine.cpp | 32 +++++-------------- .../gradientmachines/MultiGradientMachine.h | 4 ++- .../gradientmachines/NeuralNetwork.cpp | 6 ++-- .../gserver/gradientmachines/NeuralNetwork.h | 2 +- paddle/gserver/layers/CosSimLayer.cpp | 2 +- paddle/py_paddle/util.py | 2 +- 13 files changed, 35 insertions(+), 46 deletions(-) diff --git a/demo/image_classification/prediction.py b/demo/image_classification/prediction.py index 9a86aafcb2f..49c0ff600c4 100755 --- a/demo/image_classification/prediction.py +++ b/demo/image_classification/prediction.py @@ -126,7 +126,7 @@ class ImageClassifier(): # For oversampling, average predictions across crops. # If not, the shape of output[name]: (1, class_number), # the mean is also applicable. - return output[output_layer].mean(0) + return output[output_layer]['value'].mean(0) def predict(self, image=None, output_layer=None): assert isinstance(image, basestring) diff --git a/demo/model_zoo/resnet/classify.py b/demo/model_zoo/resnet/classify.py index 4631816c43e..6074cc1d3a8 100755 --- a/demo/model_zoo/resnet/classify.py +++ b/demo/model_zoo/resnet/classify.py @@ -156,7 +156,7 @@ class ImageClassifier(): # For oversampling, average predictions across crops. # If not, the shape of output[name]: (1, class_number), # the mean is also applicable. - res[name] = output[name].mean(0) + res[name] = output[name]['value'].mean(0) return res diff --git a/paddle/api/Arguments.cpp b/paddle/api/Arguments.cpp index 41beed38a87..a3f4bfffc9f 100644 --- a/paddle/api/Arguments.cpp +++ b/paddle/api/Arguments.cpp @@ -38,6 +38,13 @@ Arguments* Arguments::createByPaddleArgumentVector(void* ptr) { return args; } +Arguments* Arguments::createByPaddleArgument(const void* ptr) { + auto p = (paddle::Argument*)(ptr); + auto args = new Arguments(); + args->m->outputs.push_back(*p); + return args; +} + Matrix* Arguments::getSlotValue(size_t idx) const throw(RangeError) { auto& a = m->getArg(idx); return Matrix::createByPaddleMatrixPtr(&a.value); diff --git a/paddle/api/GradientMachine.cpp b/paddle/api/GradientMachine.cpp index a44763bfa53..a64e70a6bd5 100644 --- a/paddle/api/GradientMachine.cpp +++ b/paddle/api/GradientMachine.cpp @@ -144,12 +144,11 @@ Parameter* GradientMachine::getParameter(size_t i) throw(RangeError) { void GradientMachine::randParameters() { m->machine->randParameters(); } -Matrix* GradientMachine::getLayerOutput(const std::string& layerName) const +Arguments* GradientMachine::getLayerOutput(const std::string& layerName) const throw(UnsupportError) { auto nn = m->machine; if (nn) { - auto mat = nn->getLayerOutput(layerName); - return Matrix::createByPaddleMatrixPtr(&mat); + return Arguments::createByPaddleArgument(&nn->getLayerOutput(layerName)); } else { throw UnsupportError(); } diff --git a/paddle/api/PaddleAPI.h b/paddle/api/PaddleAPI.h index f5af8b0035b..10569a71708 100644 --- a/paddle/api/PaddleAPI.h +++ b/paddle/api/PaddleAPI.h @@ -454,6 +454,7 @@ public: private: static Arguments* createByPaddleArgumentVector(void* ptr); + static Arguments* createByPaddleArgument(const void* ptr); void* getInternalArgumentsPtr() const; private: @@ -769,7 +770,7 @@ public: void randParameters(); - Matrix* getLayerOutput(const std::string& layerName) const + Arguments* getLayerOutput(const std::string& layerName) const throw(UnsupportError); /** @@ -952,7 +953,7 @@ public: Arguments* getForwardOutput(); - Matrix* getLayerOutput(const std::string& layerName); + Arguments* getLayerOutput(const std::string& layerName); }; /// the N-Best results generated from one input sequence. diff --git a/paddle/api/Trainer.cpp b/paddle/api/Trainer.cpp index d83dc380bee..c742614aff9 100644 --- a/paddle/api/Trainer.cpp +++ b/paddle/api/Trainer.cpp @@ -131,12 +131,10 @@ void Trainer::testOneDataBatch(size_t batchSize, const Arguments& args) { void TrainerPrivate::finishTestPeriod() { tester_->finishTestPeriod(); } void Trainer::finishTestPeriod() { m->finishTestPeriod(); } -Matrix* Trainer::getLayerOutput(const std::string& layerName) { - auto nn = std::dynamic_pointer_cast( - this->m->getGradientMachine()); +Arguments* Trainer::getLayerOutput(const std::string& layerName) { + auto nn = this->m->getGradientMachine(); CHECK(nn) << "trainerInternal_.getGradientMachine() is not NeuralNetwork"; - auto m = nn->getLayerOutput(layerName); - return Matrix::createByPaddleMatrixPtr(&m); + return Arguments::createByPaddleArgument(&nn->getLayerOutput(layerName)); } void Trainer::forwardOneBatch(size_t batchSize) { diff --git a/paddle/gserver/gradientmachines/GradientMachine.h b/paddle/gserver/gradientmachines/GradientMachine.h index 5469c0d89f4..ae39783c6b7 100644 --- a/paddle/gserver/gradientmachines/GradientMachine.h +++ b/paddle/gserver/gradientmachines/GradientMachine.h @@ -134,8 +134,8 @@ public: backward(callback); } - virtual MatrixPtr getLayerOutput(const std::string& layerName) const { - return nullptr; + virtual const Argument& getLayerOutput(const std::string& layerName) { + return *((Argument*)nullptr); } // see comment in Layer.h for the function with the same name diff --git a/paddle/gserver/gradientmachines/MultiGradientMachine.cpp b/paddle/gserver/gradientmachines/MultiGradientMachine.cpp index 2d42e648302..6b11b0155e9 100644 --- a/paddle/gserver/gradientmachines/MultiGradientMachine.cpp +++ b/paddle/gserver/gradientmachines/MultiGradientMachine.cpp @@ -282,33 +282,17 @@ void MultiGradientMachine::forwardBackward(const std::vector& inArgs, backwardImp(callback); } -MatrixPtr MultiGradientMachine::getLayerOutput( - const std::string& layerName) const { - // each thread has the same neural network - auto nn = threads_[0]->getGradientMachine(); - size_t height = 0; - size_t width = nn->getLayerOutput(layerName)->getWidth(); - std::vector mats; - mats.reserve(threads_.size()); - for (auto& thread : threads_) { - MatrixPtr out = thread->getGradientMachine()->getLayerOutput(layerName); - mats.push_back(out); - height += out->getHeight(); - CHECK_EQ(width, out->getWidth()); - } +const Argument& MultiGradientMachine::getLayerOutput( + const std::string& layerName) { + std::vector args; + args.reserve(threads_.size()); - MatrixPtr layerOutput; - Matrix::resizeOrCreate(layerOutput, height, width, false, false); - - // copy one layer output from one trainer thread at each time - size_t startRow = 0; - for (auto& mat : mats) { - auto tmpMatrix = layerOutput->subMatrix(startRow, mat->getHeight()); - tmpMatrix->copyFrom(*mat); - startRow += mat->getHeight(); + for (auto& thread : threads_) { + args.push_back(thread->getGradientMachine()->getLayerOutput(layerName)); } + outLayerArgs_.concat(args, false /* use_gpu */, outArgStream_, passType_); - return layerOutput; + return outLayerArgs_; } void MultiGradientMachine::backwardImp(const UpdateCallback& callback) { diff --git a/paddle/gserver/gradientmachines/MultiGradientMachine.h b/paddle/gserver/gradientmachines/MultiGradientMachine.h index a1a2d417062..9083230afd6 100644 --- a/paddle/gserver/gradientmachines/MultiGradientMachine.h +++ b/paddle/gserver/gradientmachines/MultiGradientMachine.h @@ -189,7 +189,7 @@ public: PassType passType, const UpdateCallback& callback); - virtual MatrixPtr getLayerOutput(const std::string& layerName) const; + virtual const Argument& getLayerOutput(const std::string& layerName); virtual void onPassEnd(); @@ -316,6 +316,8 @@ protected: std::vector outArgs_; hl_stream_t outArgStream_; + Argument outLayerArgs_; + /// ParameterType which needs to be merged from each GPU std::vector mergeTypes_; int numDevices_; /* number of gpu devices */ diff --git a/paddle/gserver/gradientmachines/NeuralNetwork.cpp b/paddle/gserver/gradientmachines/NeuralNetwork.cpp index 00887c81d47..d1afde40e1f 100644 --- a/paddle/gserver/gradientmachines/NeuralNetwork.cpp +++ b/paddle/gserver/gradientmachines/NeuralNetwork.cpp @@ -293,10 +293,8 @@ void NeuralNetwork::backward(const UpdateCallback& callback) { } } -MatrixPtr NeuralNetwork::getLayerOutput(const std::string& layerName) const { - auto it = layerMap_.find(layerName); - CHECK(it != layerMap_.end()) << "Cannot find layer: " << layerName; - return it->second->getOutputValue(); +const Argument& NeuralNetwork::getLayerOutput(const std::string& layerName) { + return getLayer(layerName)->getOutput(); } void NeuralNetwork::onPassEnd() { diff --git a/paddle/gserver/gradientmachines/NeuralNetwork.h b/paddle/gserver/gradientmachines/NeuralNetwork.h index 6ecc251a409..b4dc38e31b6 100644 --- a/paddle/gserver/gradientmachines/NeuralNetwork.h +++ b/paddle/gserver/gradientmachines/NeuralNetwork.h @@ -87,7 +87,7 @@ public: virtual void backward(const UpdateCallback& callback = nullptr); - virtual MatrixPtr getLayerOutput(const std::string& layerName) const; + virtual const Argument& getLayerOutput(const std::string& layerName); const LayerPtr& getLayer(const std::string& layerName) const { auto it = layerMap_.find(layerName); diff --git a/paddle/gserver/layers/CosSimLayer.cpp b/paddle/gserver/layers/CosSimLayer.cpp index 1501c743701..57ba124e40c 100644 --- a/paddle/gserver/layers/CosSimLayer.cpp +++ b/paddle/gserver/layers/CosSimLayer.cpp @@ -68,7 +68,7 @@ void CosSimLayer::forward(PassType passType) { void CosSimLayer::backward(const UpdateCallback& callback) { /* activation */ { REGISTER_TIMER_INFO("CosBpAtvTimer", getName().c_str()); - CHECK_EQ(backward_.size(), 1) << "Only one backward function needed"; + CHECK_EQ(backward_.size(), 1UL) << "Only one backward function needed"; const auto outG = this->getOutputGrad(); const auto outV = this->getOutputValue(); diff --git a/paddle/py_paddle/util.py b/paddle/py_paddle/util.py index ce105d249aa..a708def1d2d 100644 --- a/paddle/py_paddle/util.py +++ b/paddle/py_paddle/util.py @@ -208,7 +208,7 @@ def __monkeypatch_gradient_machine__(): output = dict() for name in layerNames: - output[name] = __matrix_to_numpy__(self.getLayerOutput(name)) + output[name] = __arguments_to_numpy__(0, self.getLayerOutput(name)) return output swig_paddle.GradientMachine.getLayerOutputs = getLayerOutputs -- GitLab