提交 561c4562 编写于 作者: Z zlx

Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into improve_pruning

group: deprecated-2017Q2
language: cpp language: cpp
cache: cache:
directories: directories:
......
...@@ -99,3 +99,12 @@ value_printer ...@@ -99,3 +99,12 @@ value_printer
.. automodule:: paddle.v2.evaluator .. automodule:: paddle.v2.evaluator
:members: value_printer :members: value_printer
:noindex: :noindex:
Detection
=====
detection_map
-------------
.. automodule:: paddle.v2.evaluator
:members: detection_map
:noindex:
...@@ -5,18 +5,35 @@ import ( ...@@ -5,18 +5,35 @@ import (
"net/http" "net/http"
"net/rpc" "net/rpc"
"strconv" "strconv"
"time"
"github.com/namsral/flag" "github.com/namsral/flag"
"github.com/PaddlePaddle/Paddle/go/pserver" "github.com/PaddlePaddle/Paddle/go/pserver"
log "github.com/sirupsen/logrus"
) )
func main() { func main() {
port := flag.Int("port", 0, "port of the pserver") port := flag.Int("port", 0, "port of the pserver")
etcdEndpoint := flag.String("etcd-endpoint", "http://127.0.0.1:2379",
"comma separated endpoint string for pserver to connect to etcd")
etcdTimeout := flag.Int("etcd-timeout", 5, "timeout for etcd calls")
logLevel := flag.String("log-level", "info",
"log level, possible values: debug, info, warning, error, fatal, panic")
flag.Parse() flag.Parse()
s := pserver.NewService() level, err := log.ParseLevel(*logLevel)
err := rpc.Register(s) if err != nil {
panic(err)
}
log.SetLevel(level)
timeout := time.Second * time.Duration((*etcdTimeout))
s, err := pserver.NewService(*etcdEndpoint, timeout)
if err != nil {
panic(err)
}
err = rpc.Register(s)
if err != nil { if err != nil {
panic(err) panic(err)
} }
...@@ -27,7 +44,9 @@ func main() { ...@@ -27,7 +44,9 @@ func main() {
panic(err) panic(err)
} }
log.Infof("start pserver at port %d", *port)
err = http.Serve(l, nil) err = http.Serve(l, nil)
if err != nil { if err != nil {
panic(err) panic(err)
} }
......
...@@ -7,6 +7,7 @@ import ( ...@@ -7,6 +7,7 @@ import (
"strconv" "strconv"
"strings" "strings"
"testing" "testing"
"time"
"github.com/PaddlePaddle/Paddle/go/pserver" "github.com/PaddlePaddle/Paddle/go/pserver"
) )
...@@ -30,9 +31,12 @@ func init() { ...@@ -30,9 +31,12 @@ func init() {
port[i] = p port[i] = p
go func(l net.Listener) { go func(l net.Listener) {
s := pserver.NewService() s, err := pserver.NewService("", time.Second*5)
if err != nil {
panic(err)
}
server := rpc.NewServer() server := rpc.NewServer()
err := server.Register(s) err = server.Register(s)
if err != nil { if err != nil {
panic(err) panic(err)
} }
......
package pserver package pserver
import ( import (
"context"
"errors" "errors"
"fmt" "fmt"
"strconv"
"strings"
"sync" "sync"
"time"
"github.com/PaddlePaddle/Paddle/go/utils/networkhelper"
"github.com/coreos/etcd/clientv3"
"github.com/coreos/etcd/clientv3/concurrency"
log "github.com/sirupsen/logrus"
) )
// ElementType is the type of elements of a Parameter. // ElementType is the type of elements of a Parameter.
...@@ -24,6 +33,9 @@ const ( ...@@ -24,6 +33,9 @@ const (
Float64 Float64
) )
// PsDesired is etcd path for store desired pserver count
const PsDesired = "/ps_desired"
// Parameter is a piece of data to sync with the parameter server. // Parameter is a piece of data to sync with the parameter server.
type Parameter struct { type Parameter struct {
Name string Name string
...@@ -47,14 +59,121 @@ type Service struct { ...@@ -47,14 +59,121 @@ type Service struct {
mu sync.Mutex mu sync.Mutex
opt *optimizer opt *optimizer
paramMap map[string]Parameter paramMap map[string]Parameter
etcdEndpoints string
etcdClient *clientv3.Client
// etcdTimeout is also used as retry intervals.
etcdTimeout time.Duration
// desired number of pservers in the job.
// assume desired will not change during one training job.
desired int
// FIXME: ensure GetExternalIP gets the correct ip for trainers to connect.
externalIP string
} }
// NewService creates a new service. // NewService creates a new service, will bypass etcd registration if no
func NewService() *Service { // endpoints specified.
func NewService(endpoints string, timeout time.Duration) (*Service, error) {
s := &Service{opt: newOptimizer(sgd, 0.005)} s := &Service{opt: newOptimizer(sgd, 0.005)}
s.paramMap = make(map[string]Parameter) s.paramMap = make(map[string]Parameter)
s.initialized = make(chan struct{}) s.initialized = make(chan struct{})
return s s.etcdEndpoints = endpoints
s.etcdTimeout = timeout
var err error
s.externalIP, err = networkhelper.GetExternalIP()
if err != nil {
return nil, err
}
if endpoints != "" {
// initialize connection to etcd, try
ep := strings.Split(s.etcdEndpoints, ",")
for {
cli, err := clientv3.New(clientv3.Config{
Endpoints: ep,
DialTimeout: s.etcdTimeout,
})
if err != nil {
log.Errorf("connect to etcd error: %v", err)
time.Sleep(s.etcdTimeout)
continue
}
s.etcdClient = cli
log.Debugf("inited client to %s", s.etcdEndpoints)
break
}
// wait and set s.desired init value
for {
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
resp, err := s.etcdClient.Get(ctx, PsDesired)
cancel()
if err != nil {
log.Errorf("getting %s error: %v", PsDesired, err)
time.Sleep(s.etcdTimeout)
continue
}
if len(resp.Kvs) != 0 {
s.desired, err = strconv.Atoi(string(resp.Kvs[0].Value))
if err != nil {
log.Errorf("value of %s invalid %v\n", PsDesired, err)
time.Sleep(s.etcdTimeout)
// NOTE: wait util ps_desired value change
continue
}
break
}
}
// try register pserver node on etcd
for {
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
_, err := s.registerPserverEtcd(ctx)
cancel()
if err != nil {
log.Warn(err)
time.Sleep(s.etcdTimeout)
continue
}
break
}
} // if endpoints != ""
// Bypass etcd registration if no endpoints specified
return s, nil
}
// registerPserverEtcd registers pserver node on etcd using transaction.
func (s *Service) registerPserverEtcd(ctx context.Context) (*clientv3.TxnResponse, error) {
return concurrency.NewSTM(s.etcdClient, func(c concurrency.STM) error {
registered := false
for i := 0; i < s.desired; i++ {
psKey := "/ps/" + strconv.Itoa(i)
log.Debugf("checking %s", psKey)
ps := c.Get(psKey)
log.Debugf("got value (%s) for key: %s", ps, psKey)
if ps == "" {
resp, err := s.etcdClient.Grant(context.TODO(), 5)
if err != nil {
log.Fatal(err)
}
// find the first id and write info
c.Put(psKey, s.externalIP, clientv3.WithLease(resp.ID))
log.Debugf("set pserver node %s with value %s", psKey, s.externalIP)
_, kaerr := s.etcdClient.KeepAlive(context.TODO(), resp.ID)
if kaerr != nil {
log.Errorf("keepalive etcd node error: %v", kaerr)
return kaerr
}
log.Debug("register finished")
registered = true
break
}
}
if registered == true {
return nil
}
return errors.New("not registerd, may due to already have enough pservers")
}, concurrency.WithAbortContext(ctx), concurrency.WithIsolation(concurrency.RepeatableReads))
} }
// InitParam initializes a parameter. // InitParam initializes a parameter.
......
...@@ -10,12 +10,15 @@ import ( ...@@ -10,12 +10,15 @@ import (
) )
func TestFull(t *testing.T) { func TestFull(t *testing.T) {
s := pserver.NewService() s, err := pserver.NewService("", time.Second*5)
if err != nil {
t.Error(err)
}
var p pserver.Parameter var p pserver.Parameter
p.Name = "param_a" p.Name = "param_a"
p.Content = []byte{1, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 0} p.Content = []byte{1, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 0}
p.ElementType = pserver.Int32 p.ElementType = pserver.Int32
err := s.InitParam(pserver.ParameterWithConfig{Param: p, Config: nil}, nil) err = s.InitParam(pserver.ParameterWithConfig{Param: p, Config: nil}, nil)
if err != nil { if err != nil {
t.FailNow() t.FailNow()
} }
...@@ -72,8 +75,11 @@ func TestFull(t *testing.T) { ...@@ -72,8 +75,11 @@ func TestFull(t *testing.T) {
} }
func TestMultipleInit(t *testing.T) { func TestMultipleInit(t *testing.T) {
s := pserver.NewService() s, err := pserver.NewService("", time.Second*5)
err := s.FinishInitParams(0, nil) if err != nil {
t.Error(err)
}
err = s.FinishInitParams(0, nil)
if err != nil { if err != nil {
t.FailNow() t.FailNow()
} }
...@@ -85,15 +91,18 @@ func TestMultipleInit(t *testing.T) { ...@@ -85,15 +91,18 @@ func TestMultipleInit(t *testing.T) {
} }
func TestUninitialized(t *testing.T) { func TestUninitialized(t *testing.T) {
s := pserver.NewService() s, err := pserver.NewService("", time.Second*5)
err := s.SendGrad(pserver.Gradient{}, nil) err = s.SendGrad(pserver.Gradient{}, nil)
if err.Error() != pserver.Uninitialized { if err.Error() != pserver.Uninitialized {
t.FailNow() t.FailNow()
} }
} }
func TestBlockUntilInitialized(t *testing.T) { func TestBlockUntilInitialized(t *testing.T) {
s := pserver.NewService() s, err := pserver.NewService("", time.Second*5)
if err != nil {
t.Error(err)
}
ch := make(chan struct{}, 2) ch := make(chan struct{}, 2)
errCh := make(chan error, 2) errCh := make(chan error, 2)
var wg sync.WaitGroup var wg sync.WaitGroup
...@@ -133,7 +142,7 @@ func TestBlockUntilInitialized(t *testing.T) { ...@@ -133,7 +142,7 @@ func TestBlockUntilInitialized(t *testing.T) {
p.Name = "param_a" p.Name = "param_a"
p.Content = []byte{1, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 0} p.Content = []byte{1, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 0}
p.ElementType = pserver.Int32 p.ElementType = pserver.Int32
err := s.InitParam(pserver.ParameterWithConfig{Param: p, Config: nil}, nil) err = s.InitParam(pserver.ParameterWithConfig{Param: p, Config: nil}, nil)
if err != nil { if err != nil {
t.FailNow() t.FailNow()
} }
......
package networkhelper
import (
"errors"
"net"
)
// GetExternalIP returns the ip address of local network interface, not the
// loopback device.
func GetExternalIP() (string, error) {
ifaces, err := net.Interfaces()
if err != nil {
return "", err
}
for _, iface := range ifaces {
if iface.Flags&net.FlagUp == 0 {
continue // interface down
}
if iface.Flags&net.FlagLoopback != 0 {
continue // loopback interface
}
addrs, err := iface.Addrs()
if err != nil {
return "", err
}
for _, addr := range addrs {
var ip net.IP
switch v := addr.(type) {
case *net.IPNet:
ip = v.IP
case *net.IPAddr:
ip = v.IP
}
if ip == nil || ip.IsLoopback() {
continue
}
ip = ip.To4()
if ip == nil {
continue // not an ipv4 address
}
return ip.String(), nil
}
}
return "", errors.New("are you connected to the network?")
}
package networkhelper
import "testing"
func TestGetIP(t *testing.T) {
_, err := GetExternalIP()
if err != nil {
t.Errorf("GetExternalIP returns error : %v\n", err)
}
}
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "Evaluator.h"
#include "paddle/gserver/layers/DetectionUtil.h"
using std::map;
using std::vector;
using std::pair;
using std::make_pair;
namespace paddle {
/**
* @brief detection map Evaluator
*
* The config file api is detection_map_evaluator.
*/
class DetectionMAPEvaluator : public Evaluator {
public:
DetectionMAPEvaluator()
: evaluateDifficult_(false), cpuOutput_(nullptr), cpuLabel_(nullptr) {}
virtual void start() {
Evaluator::start();
allTruePos_.clear();
allFalsePos_.clear();
numPos_.clear();
}
virtual real evalImp(std::vector<Argument>& arguments) {
overlapThreshold_ = config_.overlap_threshold();
backgroundId_ = config_.background_id();
evaluateDifficult_ = config_.evaluate_difficult();
apType_ = config_.ap_type();
MatrixPtr detectTmpValue = arguments[0].value;
Matrix::resizeOrCreate(cpuOutput_,
detectTmpValue->getHeight(),
detectTmpValue->getWidth(),
false,
false);
MatrixPtr labelTmpValue = arguments[1].value;
Matrix::resizeOrCreate(cpuLabel_,
labelTmpValue->getHeight(),
labelTmpValue->getWidth(),
false,
false);
cpuOutput_->copyFrom(*detectTmpValue);
cpuLabel_->copyFrom(*labelTmpValue);
Argument label = arguments[1];
const int* labelIndex = label.sequenceStartPositions->getData(false);
size_t batchSize = label.getNumSequences();
vector<map<size_t, vector<NormalizedBBox>>> allGTBBoxes;
vector<map<size_t, vector<pair<real, NormalizedBBox>>>> allDetectBBoxes;
for (size_t n = 0; n < batchSize; ++n) {
map<size_t, vector<NormalizedBBox>> bboxes;
for (int i = labelIndex[n]; i < labelIndex[n + 1]; ++i) {
vector<NormalizedBBox> bbox;
getBBoxFromLabelData(cpuLabel_->getData() + i * 6, 1, bbox);
int c = cpuLabel_->getData()[i * 6];
bboxes[c].push_back(bbox[0]);
}
allGTBBoxes.push_back(bboxes);
}
size_t n = 0;
const real* cpuOutputData = cpuOutput_->getData();
for (size_t imgId = 0; imgId < batchSize; ++imgId) {
map<size_t, vector<pair<real, NormalizedBBox>>> bboxes;
size_t curImgId = static_cast<size_t>((cpuOutputData + n * 7)[0]);
while (curImgId == imgId && n < cpuOutput_->getHeight()) {
vector<real> label;
vector<real> score;
vector<NormalizedBBox> bbox;
getBBoxFromDetectData(cpuOutputData + n * 7, 1, label, score, bbox);
bboxes[label[0]].push_back(make_pair(score[0], bbox[0]));
++n;
curImgId = static_cast<size_t>((cpuOutputData + n * 7)[0]);
}
allDetectBBoxes.push_back(bboxes);
}
for (size_t n = 0; n < batchSize; ++n) {
for (map<size_t, vector<NormalizedBBox>>::iterator it =
allGTBBoxes[n].begin();
it != allGTBBoxes[n].end();
++it) {
size_t count = 0;
if (evaluateDifficult_) {
count = it->second.size();
} else {
for (size_t i = 0; i < it->second.size(); ++i)
if (!(it->second[i].isDifficult)) ++count;
}
if (numPos_.find(it->first) == numPos_.end() && count != 0) {
numPos_[it->first] = count;
} else {
numPos_[it->first] += count;
}
}
}
// calcTFPos
calcTFPos(batchSize, allGTBBoxes, allDetectBBoxes);
return 0;
}
virtual void printStats(std::ostream& os) const {
real mAP = calcMAP();
os << "Detection mAP=" << mAP;
}
virtual void distributeEval(ParameterClient2* client) {
LOG(FATAL) << "Distribute detection evaluation not implemented.";
}
protected:
void calcTFPos(const size_t batchSize,
const vector<map<size_t, vector<NormalizedBBox>>>& allGTBBoxes,
const vector<map<size_t, vector<pair<real, NormalizedBBox>>>>&
allDetectBBoxes) {
for (size_t n = 0; n < allDetectBBoxes.size(); ++n) {
if (allGTBBoxes[n].size() == 0) {
for (map<size_t, vector<pair<real, NormalizedBBox>>>::const_iterator
it = allDetectBBoxes[n].begin();
it != allDetectBBoxes[n].end();
++it) {
size_t label = it->first;
for (size_t i = 0; i < it->second.size(); ++i) {
allTruePos_[label].push_back(make_pair(it->second[i].first, 0));
allFalsePos_[label].push_back(make_pair(it->second[i].first, 1));
}
}
} else {
for (map<size_t, vector<pair<real, NormalizedBBox>>>::const_iterator
it = allDetectBBoxes[n].begin();
it != allDetectBBoxes[n].end();
++it) {
size_t label = it->first;
vector<pair<real, NormalizedBBox>> predBBoxes = it->second;
if (allGTBBoxes[n].find(label) == allGTBBoxes[n].end()) {
for (size_t i = 0; i < predBBoxes.size(); ++i) {
allTruePos_[label].push_back(make_pair(predBBoxes[i].first, 0));
allFalsePos_[label].push_back(make_pair(predBBoxes[i].first, 1));
}
} else {
vector<NormalizedBBox> gtBBoxes =
allGTBBoxes[n].find(label)->second;
vector<bool> visited(gtBBoxes.size(), false);
// Sort detections in descend order based on scores
std::sort(predBBoxes.begin(),
predBBoxes.end(),
sortScorePairDescend<NormalizedBBox>);
for (size_t i = 0; i < predBBoxes.size(); ++i) {
real maxOverlap = -1.0;
size_t maxIdx = 0;
for (size_t j = 0; j < gtBBoxes.size(); ++j) {
real overlap =
jaccardOverlap(predBBoxes[i].second, gtBBoxes[j]);
if (overlap > maxOverlap) {
maxOverlap = overlap;
maxIdx = j;
}
}
if (maxOverlap > overlapThreshold_) {
if (evaluateDifficult_ ||
(!evaluateDifficult_ && !gtBBoxes[maxIdx].isDifficult)) {
if (!visited[maxIdx]) {
allTruePos_[label].push_back(
make_pair(predBBoxes[i].first, 1));
allFalsePos_[label].push_back(
make_pair(predBBoxes[i].first, 0));
visited[maxIdx] = true;
} else {
allTruePos_[label].push_back(
make_pair(predBBoxes[i].first, 0));
allFalsePos_[label].push_back(
make_pair(predBBoxes[i].first, 1));
}
}
} else {
allTruePos_[label].push_back(make_pair(predBBoxes[i].first, 0));
allFalsePos_[label].push_back(
make_pair(predBBoxes[i].first, 1));
}
}
}
}
}
}
}
real calcMAP() const {
real mAP = 0.0;
size_t count = 0;
for (map<size_t, size_t>::const_iterator it = numPos_.begin();
it != numPos_.end();
++it) {
size_t label = it->first;
size_t labelNumPos = it->second;
if (labelNumPos == 0 || allTruePos_.find(label) == allTruePos_.end())
continue;
vector<pair<real, size_t>> labelTruePos = allTruePos_.find(label)->second;
vector<pair<real, size_t>> labelFalsePos =
allFalsePos_.find(label)->second;
// Compute average precision.
vector<size_t> tpCumSum;
getAccumulation(labelTruePos, &tpCumSum);
vector<size_t> fpCumSum;
getAccumulation(labelFalsePos, &fpCumSum);
std::vector<real> precision, recall;
size_t num = tpCumSum.size();
// Compute Precision.
for (size_t i = 0; i < num; ++i) {
CHECK_LE(tpCumSum[i], labelNumPos);
precision.push_back(static_cast<real>(tpCumSum[i]) /
static_cast<real>(tpCumSum[i] + fpCumSum[i]));
recall.push_back(static_cast<real>(tpCumSum[i]) / labelNumPos);
}
// VOC2007 style
if (apType_ == "11point") {
vector<real> maxPrecisions(11, 0.0);
int startIdx = num - 1;
for (int j = 10; j >= 0; --j)
for (int i = startIdx; i >= 0; --i) {
if (recall[i] < j / 10.) {
startIdx = i;
if (j > 0) maxPrecisions[j - 1] = maxPrecisions[j];
break;
} else {
if (maxPrecisions[j] < precision[i])
maxPrecisions[j] = precision[i];
}
}
for (int j = 10; j >= 0; --j) mAP += maxPrecisions[j] / 11;
++count;
} else if (apType_ == "Integral") {
// Nature integral
real averagePrecisions = 0.;
real prevRecall = 0.;
for (size_t i = 0; i < num; ++i) {
if (fabs(recall[i] - prevRecall) > 1e-6)
averagePrecisions += precision[i] * fabs(recall[i] - prevRecall);
prevRecall = recall[i];
}
mAP += averagePrecisions;
++count;
} else {
LOG(FATAL) << "Unkown ap version: " << apType_;
}
}
if (count != 0) mAP /= count;
return mAP * 100;
}
void getAccumulation(vector<pair<real, size_t>> inPairs,
vector<size_t>* accuVec) const {
std::stable_sort(
inPairs.begin(), inPairs.end(), sortScorePairDescend<size_t>);
accuVec->clear();
size_t sum = 0;
for (size_t i = 0; i < inPairs.size(); ++i) {
sum += inPairs[i].second;
accuVec->push_back(sum);
}
}
std::string getTypeImpl() const { return "detection_map"; }
real getValueImpl() const { return calcMAP(); }
private:
real overlapThreshold_; // overlap threshold when determining whether matched
bool evaluateDifficult_; // whether evaluate difficult ground truth
size_t backgroundId_; // class index of background
std::string apType_; // how to calculate mAP (Integral or 11point)
MatrixPtr cpuOutput_;
MatrixPtr cpuLabel_;
map<size_t, size_t> numPos_; // counts of true objects each classification
map<size_t, vector<pair<real, size_t>>>
allTruePos_; // true positive prediction
map<size_t, vector<pair<real, size_t>>>
allFalsePos_; // false positive prediction
};
REGISTER_EVALUATOR(detection_map, DetectionMAPEvaluator);
} // namespace paddle
...@@ -241,11 +241,14 @@ void NeuralNetwork::forward(const std::vector<Argument>& inArgs, ...@@ -241,11 +241,14 @@ void NeuralNetwork::forward(const std::vector<Argument>& inArgs,
dataLayers_[i]->setData(inArgs[i]); dataLayers_[i]->setData(inArgs[i]);
} }
gLayerStackTrace.set_stage(true);
{ {
for (auto& layer : layers_) { for (auto& layer : layers_) {
REGISTER_TIMER_INFO("ForwardTimer", layer->getName().c_str()); REGISTER_TIMER_INFO("ForwardTimer", layer->getName().c_str());
gLayerStackTrace.push(layer->getName()); gLayerStackTrace.push(layer->getName());
layer->forward(passType); layer->forward(passType);
gLayerStackTrace.pop(layer->getName());
} }
} }
...@@ -254,9 +257,6 @@ void NeuralNetwork::forward(const std::vector<Argument>& inArgs, ...@@ -254,9 +257,6 @@ void NeuralNetwork::forward(const std::vector<Argument>& inArgs,
for (auto& layer : outputLayers_) { for (auto& layer : outputLayers_) {
outArgs->push_back(layer->getOutput()); outArgs->push_back(layer->getOutput());
} }
if (passType == PASS_TEST) {
gLayerStackTrace.clear();
}
} }
void NeuralNetwork::resetState() { void NeuralNetwork::resetState() {
...@@ -283,9 +283,10 @@ void NeuralNetwork::getState(MachineState& machineState) { ...@@ -283,9 +283,10 @@ void NeuralNetwork::getState(MachineState& machineState) {
} }
void NeuralNetwork::backward(const UpdateCallback& callback) { void NeuralNetwork::backward(const UpdateCallback& callback) {
gLayerStackTrace.pop(""); // tell layer trace is during backward. gLayerStackTrace.set_stage(false);
FOR_EACH_R(layer, layers_) { FOR_EACH_R(layer, layers_) {
REGISTER_TIMER_INFO("BackwardTimer", (*layer)->getName().c_str()); REGISTER_TIMER_INFO("BackwardTimer", (*layer)->getName().c_str());
gLayerStackTrace.push((*layer)->getName());
if ((*layer)->needGradient()) { if ((*layer)->needGradient()) {
(*layer)->backward(callback); (*layer)->backward(callback);
} }
...@@ -320,7 +321,7 @@ public: ...@@ -320,7 +321,7 @@ public:
} }
} }
virtual void eval(const NeuralNetwork& nn) { virtual void eval(const NeuralNetwork& nn) override {
for (auto& evaluator : evaluators_) { for (auto& evaluator : evaluators_) {
evaluator->eval(nn); evaluator->eval(nn);
} }
...@@ -395,6 +396,30 @@ private: ...@@ -395,6 +396,30 @@ private:
} }
}; };
class SubnetEvaluator : public CombinedEvaluator {
public:
SubnetEvaluator(const std::string& layerName,
std::unique_ptr<Evaluator>&& evaluator)
: layerName_(layerName) {
addEvaluator(std::move(evaluator));
}
virtual void eval(const NeuralNetwork& nn) override {
const LayerPtr& layer = nn.getLayer(layerName_);
CHECK(layer) << "Nonexisted layer: " << layerName_ << " in submodel "
<< nn.getName();
bool accessed = false;
layer->accessSubNetwork([this, &accessed](NeuralNetwork& subnet) {
subnet.eval(evaluators_[0].get());
accessed = true;
});
CHECK(accessed) << "There is no subnetwork for layer " << layerName_
<< " in submodel " << nn.getName();
}
protected:
std::string layerName_;
};
Evaluator* NeuralNetwork::makeEvaluator() const { Evaluator* NeuralNetwork::makeEvaluator() const {
CombinedEvaluator* combinedEvaluator = new CombinedEvaluator(); CombinedEvaluator* combinedEvaluator = new CombinedEvaluator();
auto subModelConfig = std::find_if(config_.sub_models().begin(), auto subModelConfig = std::find_if(config_.sub_models().begin(),
...@@ -421,6 +446,15 @@ Evaluator* NeuralNetwork::makeEvaluator() const { ...@@ -421,6 +446,15 @@ Evaluator* NeuralNetwork::makeEvaluator() const {
combinedEvaluator->addEvaluator(std::move(evaluator)); combinedEvaluator->addEvaluator(std::move(evaluator));
} }
} }
for (auto& layer : layers_) {
layer->accessSubNetwork(
[layer, combinedEvaluator](NeuralNetwork& subnet) {
std::unique_ptr<Evaluator> subEvaluator(new SubnetEvaluator(
layer->getName(),
std::unique_ptr<Evaluator>(subnet.makeEvaluator())));
combinedEvaluator->addEvaluator(std::move(subEvaluator));
});
}
} else { } else {
for (const EvaluatorConfig& evalConfig : config_.evaluators()) { for (const EvaluatorConfig& evalConfig : config_.evaluators()) {
std::unique_ptr<Evaluator> evaluator(Evaluator::create(evalConfig)); std::unique_ptr<Evaluator> evaluator(Evaluator::create(evalConfig));
......
...@@ -129,6 +129,8 @@ public: ...@@ -129,6 +129,8 @@ public:
static NeuralNetwork* newNeuralNetwork(const std::string& name = "", static NeuralNetwork* newNeuralNetwork(const std::string& name = "",
NeuralNetwork* rootNetwork = nullptr); NeuralNetwork* rootNetwork = nullptr);
const std::string& getName() const { return subModelName_; }
protected: protected:
/** /**
* The constructor of NeuralNetwork. * The constructor of NeuralNetwork.
......
...@@ -208,6 +208,7 @@ void RecurrentGradientMachine::init( ...@@ -208,6 +208,7 @@ void RecurrentGradientMachine::init(
}); });
CHECK(subModelConfig != config.sub_models().end()); CHECK(subModelConfig != config.sub_models().end());
reversed_ = subModelConfig->reversed(); reversed_ = subModelConfig->reversed();
generating_ = subModelConfig->has_generator();
inFrameLines_.resize(subModelConfig->in_links_size()); inFrameLines_.resize(subModelConfig->in_links_size());
for (size_t i = 0; i < inFrameLines_.size(); ++i) { for (size_t i = 0; i < inFrameLines_.size(); ++i) {
...@@ -287,10 +288,6 @@ void RecurrentGradientMachine::init( ...@@ -287,10 +288,6 @@ void RecurrentGradientMachine::init(
parameterIds_.push_back(para->getID()); parameterIds_.push_back(para->getID());
} }
} }
if (subModelConfig->evaluator_names_size() > 0) {
evaluator_.reset(frames_[0]->makeEvaluator());
}
} }
void RecurrentGradientMachine::resizeOrCreateFrames(int numFrames) { void RecurrentGradientMachine::resizeOrCreateFrames(int numFrames) {
...@@ -538,7 +535,7 @@ void RecurrentGradientMachine::forward(const std::vector<Argument>& inArgs, ...@@ -538,7 +535,7 @@ void RecurrentGradientMachine::forward(const std::vector<Argument>& inArgs,
The outputs are outFramesLines_[i].agentLayer The outputs are outFramesLines_[i].agentLayer
*/ */
if (inFrameLines_.empty() && passType == PASS_TEST) { if (generating_) {
generateSequence(); generateSequence();
return; return;
} // else forward.. } // else forward..
...@@ -561,14 +558,14 @@ void RecurrentGradientMachine::forward(const std::vector<Argument>& inArgs, ...@@ -561,14 +558,14 @@ void RecurrentGradientMachine::forward(const std::vector<Argument>& inArgs,
std::vector<Argument> outArgs; std::vector<Argument> outArgs;
frames_[i]->forward(inArgs, &outArgs, passType); frames_[i]->forward(inArgs, &outArgs, passType);
} }
if (evaluator_ && passType == PASS_TEST) {
this->eval(evaluator_.get());
}
reorganizeOutput(passType); reorganizeOutput(passType);
} }
void RecurrentGradientMachine::backward(const UpdateCallback& callback) { void RecurrentGradientMachine::backward(const UpdateCallback& callback) {
if (generating_) {
return;
}
REGISTER_TIMER_INFO("RecurrentBwTime", "RecurrentBwTime"); REGISTER_TIMER_INFO("RecurrentBwTime", "RecurrentBwTime");
AsyncGpuBlock asyncGpuBlock; AsyncGpuBlock asyncGpuBlock;
for (int i = maxSequenceLength_ - 1; i >= 0; --i) { for (int i = maxSequenceLength_ - 1; i >= 0; --i) {
...@@ -577,11 +574,6 @@ void RecurrentGradientMachine::backward(const UpdateCallback& callback) { ...@@ -577,11 +574,6 @@ void RecurrentGradientMachine::backward(const UpdateCallback& callback) {
for (auto& memoryFrameLine : memoryFrameLines_) { for (auto& memoryFrameLine : memoryFrameLines_) {
memoryFrameLine.bootLayer->backward(nullptr); memoryFrameLine.bootLayer->backward(nullptr);
} }
// call printers here so the gradient can be printed
if (evaluator_) {
this->eval(evaluator_.get());
}
} }
void RecurrentGradientMachine::forwardBackward( void RecurrentGradientMachine::forwardBackward(
...@@ -595,9 +587,9 @@ void RecurrentGradientMachine::forwardBackward( ...@@ -595,9 +587,9 @@ void RecurrentGradientMachine::forwardBackward(
void RecurrentGradientMachine::eval(Evaluator* evaluator) const { void RecurrentGradientMachine::eval(Evaluator* evaluator) const {
// call printers frame by frame // call printers frame by frame
for (int i = 0; i < maxSequenceLength_; ++i) { for (int i = 0; i < maxSequenceLength_; ++i) {
LOG(INFO) << "Recurrent Layer Group eval frame " << i << " begin"; VLOG(2) << "Recurrent Layer Group eval frame " << i << " begin";
evaluator->eval(*(frames_[i].get())); evaluator->eval(*(frames_[i].get()));
LOG(INFO) << "Recurrent Layer Group eval frame " << i << " end"; VLOG(2) << "Recurrent Layer Group eval frame " << i << " end";
} }
} }
...@@ -1093,10 +1085,6 @@ void RecurrentGradientMachine::oneWaySearch(size_t batchSize) { ...@@ -1093,10 +1085,6 @@ void RecurrentGradientMachine::oneWaySearch(size_t batchSize) {
copyDataOutlinkFrame(machineCur); copyDataOutlinkFrame(machineCur);
// call value printer
if (evaluator_) {
evaluator_->eval(*(frames_[machineCur].get()));
}
// check eos // check eos
const IVectorPtr& eosVec = const IVectorPtr& eosVec =
eosFrameLine_->layers[machineCur]->getOutput().ids; eosFrameLine_->layers[machineCur]->getOutput().ids;
...@@ -1321,11 +1309,10 @@ void RecurrentGradientMachine::fillGenOutputs() { ...@@ -1321,11 +1309,10 @@ void RecurrentGradientMachine::fillGenOutputs() {
batchMachineIdVec_.clear(); batchMachineIdVec_.clear();
generator_.ids.clear(); generator_.ids.clear();
int* starts = generator_.outArg.sequenceStartPositions->getMutableData(false);
starts[0] = 0;
if (numResults > 1) { if (numResults > 1) {
real* probs = generator_.outArg.in->getData(); real* probs = generator_.outArg.in->getData();
int* starts =
generator_.outArg.sequenceStartPositions->getMutableData(false);
starts[0] = 0;
for (size_t i = 0; i < finalPaths_.size(); ++i) { for (size_t i = 0; i < finalPaths_.size(); ++i) {
for (size_t j = 0; j < finalPaths_[i].size(); ++j) { for (size_t j = 0; j < finalPaths_[i].size(); ++j) {
Path& path = finalPaths_[i][j]; Path& path = finalPaths_[i][j];
...@@ -1348,7 +1335,10 @@ void RecurrentGradientMachine::fillGenOutputs() { ...@@ -1348,7 +1335,10 @@ void RecurrentGradientMachine::fillGenOutputs() {
} else { } else {
for (size_t i = 0; i < finalPaths_.size(); ++i) { for (size_t i = 0; i < finalPaths_.size(); ++i) {
CHECK(!finalPaths_[i].empty()); CHECK(!finalPaths_[i].empty());
generator_.ids = finalPaths_[i][0].ids; generator_.ids.insert(generator_.ids.begin(),
finalPaths_[i][0].ids.begin(),
finalPaths_[i][0].ids.end());
starts[i + 1] = starts[i] + finalPaths_[i][0].ids.size();
} }
} }
} }
......
...@@ -414,6 +414,7 @@ protected: ...@@ -414,6 +414,7 @@ protected:
std::vector<int> ids; // store generated sequences std::vector<int> ids; // store generated sequences
Argument outArg; // final output argument Argument outArg; // final output argument
}; };
bool generating_;
Generator generator_; Generator generator_;
std::vector<std::unique_ptr<NeuralNetwork>> frames_; std::vector<std::unique_ptr<NeuralNetwork>> frames_;
...@@ -428,8 +429,6 @@ protected: ...@@ -428,8 +429,6 @@ protected:
std::vector<int> std::vector<int>
parameterIds_; // parameters actually used by this Layer Group parameterIds_; // parameters actually used by this Layer Group
std::unique_ptr<Evaluator> evaluator_; // frame printers in this layer group
// store final argument of outFrameLines_ // store final argument of outFrameLines_
std::vector<Argument> dataArgs_; std::vector<Argument> dataArgs_;
// store each frame's output argument of outFrameLines_ // store each frame's output argument of outFrameLines_
......
...@@ -109,6 +109,40 @@ void GatherAgentLayer::forwardValue(PassType passType) { ...@@ -109,6 +109,40 @@ void GatherAgentLayer::forwardValue(PassType passType) {
} }
} }
namespace {
// dest[index[i]] <- src[i] for each i
void copyElements(const IVector& srcVec,
const IVector& indexVec,
IVector& destVec) {
const int* src = srcVec.getData();
const int* index = indexVec.getData();
int* dest = destVec.getData();
int len = indexVec.getSize();
CHECK_EQ(srcVec.getSize(), indexVec.getSize());
for (int i = 0; i < len; ++i) {
dest[index[i]] = src[i];
}
}
}
void GatherAgentLayer::forwardIds(PassType passType) {
IVectorPtr realId = realLayers_[0]->getOutputLabel();
if (!realId) return;
IVector::resizeOrCreate(output_.ids, allIds_->getSize(), useGpu_);
IVectorPtr outId = output_.ids;
idsVec_.resize(idIndex_.size());
for (size_t i = 0; i < realLayers_.size(); ++i) {
const IVectorPtr& realId = realLayers_[i]->getOutputLabel();
idsVec_[i] = IVector::create(allIds_->getData() + idIndex_[i],
/* size */ realId->getSize(),
useGpu_);
execViaCpu(&copyElements, *realId, *idsVec_[i], *outId);
}
}
void GatherAgentLayer::backward(const UpdateCallback& callback) { void GatherAgentLayer::backward(const UpdateCallback& callback) {
(void)callback; (void)callback;
const MatrixPtr& outputGrad = getOutputGrad(); const MatrixPtr& outputGrad = getOutputGrad();
...@@ -136,23 +170,22 @@ void ScatterAgentLayer::forward(PassType passType) { ...@@ -136,23 +170,22 @@ void ScatterAgentLayer::forward(PassType passType) {
CHECK_EQ(realLayer_->getDeviceId(), this->getDeviceId()); CHECK_EQ(realLayer_->getDeviceId(), this->getDeviceId());
int width = this->getSize(); int width = this->getSize();
if (realOutArg_.hasSeq()) { if (selectionMode_) {
forwardSequence(passType); forwardWithSelection(passType);
} else if (realOutArg_.value || realOutArg_.ids) { } else {
output_.subArgFrom( if (realOutArg_.hasSeq()) {
realOutArg_, /* offset */ idIndex_, idSize_, width, useGpu_); output_.subArgFrom(realOutArg_,
} else { // used in generation /* offset */ idIndex_,
if (realLayer_->getOutput().ids) { idSize_,
IVector::resizeOrCreate(output_.ids, ids_->getSize(), useGpu_); width,
output_.ids->selectFrom(*realLayer_->getOutput().ids, *ids_); useGpu_,
} /* trans */ false,
if (realLayer_->getOutput().value) { /* seqFlag */ true,
int height = ids_->getSize(); /* seqStart */ seqStartPosIndex_,
resetOutput(height, width); /* seqSize */ numSequences_);
} else {
const MatrixPtr& outV = getOutputValue(); output_.subArgFrom(
const MatrixPtr& realV = realLayer_->getOutputValue(); realOutArg_, /* offset */ idIndex_, idSize_, width, useGpu_);
outV->selectRows(*realV, *ids_);
} }
} }
} }
...@@ -160,6 +193,8 @@ void ScatterAgentLayer::forward(PassType passType) { ...@@ -160,6 +193,8 @@ void ScatterAgentLayer::forward(PassType passType) {
void ScatterAgentLayer::backward(const UpdateCallback& callback) { void ScatterAgentLayer::backward(const UpdateCallback& callback) {
(void)callback; (void)callback;
CHECK(!selectionMode_);
const MatrixPtr& outputGrad = realOutArg_.grad; const MatrixPtr& outputGrad = realOutArg_.grad;
const MatrixPtr& realGrad = realLayer_->getOutputGrad(); const MatrixPtr& realGrad = realLayer_->getOutputGrad();
if (realGrad) { if (realGrad) {
...@@ -174,42 +209,7 @@ void ScatterAgentLayer::backward(const UpdateCallback& callback) { ...@@ -174,42 +209,7 @@ void ScatterAgentLayer::backward(const UpdateCallback& callback) {
REGISTER_LAYER(gather_agent, GatherAgentLayer); REGISTER_LAYER(gather_agent, GatherAgentLayer);
REGISTER_LAYER(scatter_agent, ScatterAgentLayer); REGISTER_LAYER(scatter_agent, ScatterAgentLayer);
void GatherAgentLayer::forwardIds(PassType passType) { void ScatterAgentLayer::forwardWithSelection(PassType passType) {
int height = 0;
IVectorPtr idReal = realLayers_[0]->getOutputLabel();
if (!idReal) return;
if (output_.subSequenceStartPositions) {
int* starts = output_.subSequenceStartPositions->getMutableData(false);
// Gather generator.idsVec
// if is beam search generation result. Get first result.
if (idReal->getData()[idReal->getSize() - 1] == -1) {
for (size_t i = 0; i < realLayers_.size(); ++i) {
// The first element stores first result size
idReal = realLayers_[i]->getOutputLabel();
idReal->subVecFrom(*idReal, 1, idReal->getData()[0]);
}
}
for (size_t i = 0; i < realLayers_.size(); ++i) {
CHECK(realLayers_[i]->getOutputLabel());
starts[i] = height;
height += realLayers_[i]->getOutputLabel()->getSize();
}
starts[realLayers_.size()] = height;
output_.sequenceStartPositions->getMutableData(false)[1] = height;
IVector::resizeOrCreate(output_.ids, height, false);
for (size_t i = 0; i < realLayers_.size(); ++i) {
output_.ids->subVec(starts[i], starts[i + 1] - starts[i])
->copyFrom(*realLayers_[i]->getOutputLabel());
}
} else {
LOG(FATAL) << "Not implemented";
}
}
void ScatterAgentLayer::forwardSequence(PassType passType) {
Layer::forward(passType); Layer::forward(passType);
CHECK_EQ(realLayer_->getDeviceId(), this->getDeviceId()); CHECK_EQ(realLayer_->getDeviceId(), this->getDeviceId());
...@@ -220,17 +220,19 @@ void ScatterAgentLayer::forwardSequence(PassType passType) { ...@@ -220,17 +220,19 @@ void ScatterAgentLayer::forwardSequence(PassType passType) {
AsyncGpuBlock asyncGpuBlock; AsyncGpuBlock asyncGpuBlock;
REGISTER_TIMER_INFO("SequenceAgentLayerForward", getName().c_str()); REGISTER_TIMER_INFO("SequenceAgentLayerForward", getName().c_str());
if (realOutArg_.value || realOutArg_.ids) { if (!input.hasSeq()) {
CHECK(realOutArg_.sequenceStartPositions); if (realLayer_->getOutput().ids) {
output_.subArgFrom(realOutArg_, IVector::resizeOrCreate(output_.ids, ids_->getSize(), useGpu_);
/* offset */ idIndex_, output_.ids->selectFrom(*realLayer_->getOutput().ids, *ids_);
idSize_, }
width, if (realLayer_->getOutput().value) {
useGpu_, int height = ids_->getSize();
/* trans */ false, resetOutput(height, width);
/* seqFlag */ true,
/* seqStart */ seqStartPosIndex_, const MatrixPtr& outV = getOutputValue();
/* seqSize */ numSequences_); const MatrixPtr& realV = realLayer_->getOutputValue();
outV->selectRows(*realV, *ids_);
}
} else { } else {
// Putting the generation logic here is really an ugly hack! // Putting the generation logic here is really an ugly hack!
// used in generation // used in generation
......
...@@ -110,6 +110,9 @@ protected: ...@@ -110,6 +110,9 @@ protected:
// of real layer. // of real layer.
ICpuGpuVectorPtr inputStartPos_; ICpuGpuVectorPtr inputStartPos_;
// true for setRealLayer, false for setRealLayerAndOutput
bool selectionMode_;
public: public:
explicit ScatterAgentLayer(const LayerConfig& config) : Layer(config) {} explicit ScatterAgentLayer(const LayerConfig& config) : Layer(config) {}
...@@ -137,6 +140,7 @@ public: ...@@ -137,6 +140,7 @@ public:
} else { } else {
cpuIds_ = ids_; cpuIds_ = ids_;
} }
selectionMode_ = true;
} }
// set real layer and output, [idIndex, idIndex + idSize) of *ids* // set real layer and output, [idIndex, idIndex + idSize) of *ids*
...@@ -153,6 +157,7 @@ public: ...@@ -153,6 +157,7 @@ public:
idIndex_ = idIndex; idIndex_ = idIndex;
idSize_ = idSize; idSize_ = idSize;
handleBackward_ = handleBackward; handleBackward_ = handleBackward;
selectionMode_ = false;
} }
void setSequenceStartPositions(const ICpuGpuVectorPtr& sequenceStartPositions, void setSequenceStartPositions(const ICpuGpuVectorPtr& sequenceStartPositions,
...@@ -166,7 +171,7 @@ public: ...@@ -166,7 +171,7 @@ public:
void forward(PassType passType) override; void forward(PassType passType) override;
void backward(const UpdateCallback& callback) override; void backward(const UpdateCallback& callback) override;
void forwardSequence(PassType passType); void forwardWithSelection(PassType passType);
}; };
} // namespace paddle } // namespace paddle
...@@ -138,6 +138,23 @@ void testEvaluatorAll(TestConfig testConf, ...@@ -138,6 +138,23 @@ void testEvaluatorAll(TestConfig testConf,
testEvaluator(testConf, testEvaluatorName, batchSize, false); testEvaluator(testConf, testEvaluatorName, batchSize, false);
} }
TEST(Evaluator, detection_map) {
TestConfig config;
config.evaluatorConfig.set_type("detection_map");
config.evaluatorConfig.set_overlap_threshold(0.5);
config.evaluatorConfig.set_background_id(0);
config.evaluatorConfig.set_ap_type("Integral");
config.evaluatorConfig.set_evaluate_difficult(0);
config.inputDefs.push_back({INPUT_DATA, "output", 7});
config.inputDefs.push_back({INPUT_SEQUENCE_DATA, "label", 6});
config.evaluatorConfig.set_evaluate_difficult(false);
testEvaluatorAll(config, "detection_map", 100);
config.evaluatorConfig.set_evaluate_difficult(true);
testEvaluatorAll(config, "detection_map", 100);
}
TEST(Evaluator, classification_error) { TEST(Evaluator, classification_error) {
TestConfig config; TestConfig config;
config.evaluatorConfig.set_type("classification_error"); config.evaluatorConfig.set_type("classification_error");
......
...@@ -35,7 +35,7 @@ def outer_step(dummy_data): ...@@ -35,7 +35,7 @@ def outer_step(dummy_data):
embedding_size=num_words)] embedding_size=num_words)]
def inner_step(dummy_memory, predict_word): def inner_step(dummy_memory, predict_word):
# simplified RNN for testing # simplified RNN for testing
with mixed_layer(size=num_words) as layer: with mixed_layer(size=num_words) as layer:
layer += full_matrix_projection(input=predict_word, layer += full_matrix_projection(input=predict_word,
...@@ -46,15 +46,15 @@ def outer_step(dummy_data): ...@@ -46,15 +46,15 @@ def outer_step(dummy_data):
param_attr=ParamAttr(name="wordvec")) param_attr=ParamAttr(name="wordvec"))
return out return out
beam_gen = beam_search(name="rnn_gen", beam_gen = beam_search(name="rnn_gen",
step=inner_step, step=inner_step,
input=gen_inputs, input=gen_inputs,
bos_id=0, bos_id=0,
eos_id=num_words-1, eos_id=num_words-1,
beam_size=2 if beam_flag else 1, beam_size=2 if beam_flag else 1,
num_results_per_sample=2 if beam_flag else 1, num_results_per_sample=1,
max_length=10) max_length=10)
return beam_gen return beam_gen
beam_gen_concat = recurrent_group(name="rnn_gen_concat", beam_gen_concat = recurrent_group(name="rnn_gen_concat",
......
...@@ -33,7 +33,7 @@ gen_inputs = [StaticInput(input=dummy_data, size=2), ...@@ -33,7 +33,7 @@ gen_inputs = [StaticInput(input=dummy_data, size=2),
embedding_size=num_words)] embedding_size=num_words)]
def step(dummy_memory, predict_word): def step(dummy_memory, predict_word):
# simplified RNN for testing # simplified RNN for testing
with mixed_layer(size=num_words) as layer: with mixed_layer(size=num_words) as layer:
layer += full_matrix_projection(input=predict_word, layer += full_matrix_projection(input=predict_word,
...@@ -44,7 +44,7 @@ def step(dummy_memory, predict_word): ...@@ -44,7 +44,7 @@ def step(dummy_memory, predict_word):
param_attr=ParamAttr(name="wordvec")) param_attr=ParamAttr(name="wordvec"))
return out return out
beam_gen = beam_search(name="rnn_gen", beam_gen = beam_search(name="rnn_gen",
step=step, step=step,
input=gen_inputs, input=gen_inputs,
...@@ -52,7 +52,7 @@ beam_gen = beam_search(name="rnn_gen", ...@@ -52,7 +52,7 @@ beam_gen = beam_search(name="rnn_gen",
eos_id=num_words-1, eos_id=num_words-1,
beam_size=2 if beam_flag else 1, beam_size=2 if beam_flag else 1,
num_results_per_sample=2 if beam_flag else 1, num_results_per_sample=2 if beam_flag else 1,
max_length=10) max_length=10)
seqtext_printer_evaluator(input=beam_gen, seqtext_printer_evaluator(input=beam_gen,
id_input=sent_id, id_input=sent_id,
......
...@@ -55,13 +55,17 @@ public: ...@@ -55,13 +55,17 @@ public:
* Else, just set status to popping. * Else, just set status to popping.
*/ */
void pop(const T& item) { void pop(const T& item) {
pushing() = false;
auto& s = this->stack(); auto& s = this->stack();
if (item == s.top()) { if (item == s.top()) {
s.pop(); s.pop();
} }
} }
/**
* @brief Indicate whether we are at forward or backward stage of computation
*/
void set_stage(bool isForward) { pushing() = isForward; }
/** /**
* @brief clear current thread stack. * @brief clear current thread stack.
*/ */
......
...@@ -72,7 +72,6 @@ TEST(CustomStackTrace, normalTrain) { ...@@ -72,7 +72,6 @@ TEST(CustomStackTrace, normalTrain) {
for (size_t i = 0; i < layerSize; ++i) { for (size_t i = 0; i < layerSize; ++i) {
tracer.push("layer_" + paddle::str::to_string(i)); tracer.push("layer_" + paddle::str::to_string(i));
} }
tracer.pop("");
for (size_t i = 0; i < layerSize; ++i) { for (size_t i = 0; i < layerSize; ++i) {
tracer.pop("layer_" + paddle::str::to_string(layerSize - 1 - i)); tracer.pop("layer_" + paddle::str::to_string(layerSize - 1 - i));
} }
......
...@@ -489,6 +489,15 @@ message EvaluatorConfig { ...@@ -489,6 +489,15 @@ message EvaluatorConfig {
// Used by ClassificationErrorEvaluator // Used by ClassificationErrorEvaluator
// top # classification error // top # classification error
optional int32 top_k = 13 [default = 1]; optional int32 top_k = 13 [default = 1];
// Used by DetectionMAPEvaluator
optional double overlap_threshold = 14 [default = 0.5];
optional int32 background_id = 15 [default = 0];
optional bool evaluate_difficult = 16 [default = false];
optional string ap_type = 17 [default = "11point"];
} }
message LinkConfig { message LinkConfig {
......
...@@ -1280,20 +1280,23 @@ def parse_maxout(maxout, input_layer_name, maxout_conf): ...@@ -1280,20 +1280,23 @@ def parse_maxout(maxout, input_layer_name, maxout_conf):
# Define an evaluator # Define an evaluator
@config_func @config_func
def Evaluator( def Evaluator(name,
name, type,
type, inputs,
inputs, chunk_scheme=None,
chunk_scheme=None, num_chunk_types=None,
num_chunk_types=None, classification_threshold=None,
classification_threshold=None, positive_label=None,
positive_label=None, dict_file=None,
dict_file=None, result_file=None,
result_file=None, num_results=None,
num_results=None, top_k=None,
top_k=None, delimited=None,
delimited=None, excluded_chunk_types=None,
excluded_chunk_types=None, ): overlap_threshold=None,
background_id=None,
evaluate_difficult=None,
ap_type=None):
evaluator = g_config.model_config.evaluators.add() evaluator = g_config.model_config.evaluators.add()
evaluator.type = type evaluator.type = type
evaluator.name = MakeLayerNameInSubmodel(name) evaluator.name = MakeLayerNameInSubmodel(name)
...@@ -1327,6 +1330,18 @@ def Evaluator( ...@@ -1327,6 +1330,18 @@ def Evaluator(
if excluded_chunk_types: if excluded_chunk_types:
evaluator.excluded_chunk_types.extend(excluded_chunk_types) evaluator.excluded_chunk_types.extend(excluded_chunk_types)
if overlap_threshold is not None:
evaluator.overlap_threshold = overlap_threshold
if background_id is not None:
evaluator.background_id = background_id
if evaluate_difficult is not None:
evaluator.evaluate_difficult = evaluate_difficult
if ap_type is not None:
evaluator.ap_type = ap_type
class LayerBase(object): class LayerBase(object):
def __init__( def __init__(
......
...@@ -21,7 +21,8 @@ __all__ = [ ...@@ -21,7 +21,8 @@ __all__ = [
"chunk_evaluator", "sum_evaluator", "column_sum_evaluator", "chunk_evaluator", "sum_evaluator", "column_sum_evaluator",
"value_printer_evaluator", "gradient_printer_evaluator", "value_printer_evaluator", "gradient_printer_evaluator",
"maxid_printer_evaluator", "maxframe_printer_evaluator", "maxid_printer_evaluator", "maxframe_printer_evaluator",
"seqtext_printer_evaluator", "classification_error_printer_evaluator" "seqtext_printer_evaluator", "classification_error_printer_evaluator",
"detection_map_evaluator"
] ]
...@@ -31,10 +32,11 @@ class EvaluatorAttribute(object): ...@@ -31,10 +32,11 @@ class EvaluatorAttribute(object):
FOR_RANK = 1 << 2 FOR_RANK = 1 << 2
FOR_PRINT = 1 << 3 FOR_PRINT = 1 << 3
FOR_UTILS = 1 << 4 FOR_UTILS = 1 << 4
FOR_DETECTION = 1 << 5
KEYS = [ KEYS = [
"for_classification", "for_regression", "for_rank", "for_print", "for_classification", "for_regression", "for_rank", "for_print",
"for_utils" "for_utils", "for_detection"
] ]
@staticmethod @staticmethod
...@@ -57,22 +59,25 @@ def evaluator(*attrs): ...@@ -57,22 +59,25 @@ def evaluator(*attrs):
return impl return impl
def evaluator_base( def evaluator_base(input,
input, type,
type, label=None,
label=None, weight=None,
weight=None, name=None,
name=None, chunk_scheme=None,
chunk_scheme=None, num_chunk_types=None,
num_chunk_types=None, classification_threshold=None,
classification_threshold=None, positive_label=None,
positive_label=None, dict_file=None,
dict_file=None, result_file=None,
result_file=None, num_results=None,
num_results=None, delimited=None,
delimited=None, top_k=None,
top_k=None, excluded_chunk_types=None,
excluded_chunk_types=None, ): overlap_threshold=None,
background_id=None,
evaluate_difficult=None,
ap_type=None):
""" """
Evaluator will evaluate the network status while training/testing. Evaluator will evaluate the network status while training/testing.
...@@ -107,6 +112,14 @@ def evaluator_base( ...@@ -107,6 +112,14 @@ def evaluator_base(
:type weight: LayerOutput. :type weight: LayerOutput.
:param top_k: number k in top-k error rate :param top_k: number k in top-k error rate
:type top_k: int :type top_k: int
:param overlap_threshold: In detection tasks to filter detection results
:type overlap_threshold: float
:param background_id: Identifier of background class
:type background_id: int
:param evaluate_difficult: Whether to evaluate difficult objects
:type evaluate_difficult: bool
:param ap_type: How to calculate average persicion
:type ap_type: str
""" """
# inputs type assertions. # inputs type assertions.
assert classification_threshold is None or isinstance( assert classification_threshold is None or isinstance(
...@@ -136,7 +149,61 @@ def evaluator_base( ...@@ -136,7 +149,61 @@ def evaluator_base(
delimited=delimited, delimited=delimited,
num_results=num_results, num_results=num_results,
top_k=top_k, top_k=top_k,
excluded_chunk_types=excluded_chunk_types, ) excluded_chunk_types=excluded_chunk_types,
overlap_threshold=overlap_threshold,
background_id=background_id,
evaluate_difficult=evaluate_difficult,
ap_type=ap_type)
@evaluator(EvaluatorAttribute.FOR_DETECTION)
@wrap_name_default()
def detection_map_evaluator(input,
label,
overlap_threshold=0.5,
background_id=0,
evaluate_difficult=False,
ap_type="11point",
name=None):
"""
Detection mAP Evaluator. It will print mean Average Precision (mAP) for detection.
The detection mAP Evaluator based on the output of detection_output layer counts
the true positive and the false positive bbox and integral them to get the
mAP.
The simple usage is:
.. code-block:: python
eval = detection_map_evaluator(input=det_output,label=lbl)
:param input: Input layer.
:type input: LayerOutput
:param label: Label layer.
:type label: LayerOutput
:param overlap_threshold: The bbox overlap threshold of a true positive.
:type overlap_threshold: float
:param background_id: The background class index.
:type background_id: int
:param evaluate_difficult: Whether evaluate a difficult ground truth.
:type evaluate_difficult: bool
"""
if not isinstance(input, list):
input = [input]
if label:
input.append(label)
evaluator_base(
name=name,
type="detection_map",
input=input,
label=label,
overlap_threshold=overlap_threshold,
background_id=background_id,
evaluate_difficult=evaluate_difficult,
ap_type=ap_type)
@evaluator(EvaluatorAttribute.FOR_CLASSIFICATION) @evaluator(EvaluatorAttribute.FOR_CLASSIFICATION)
......
...@@ -3839,7 +3839,8 @@ def classification_cost(input, ...@@ -3839,7 +3839,8 @@ def classification_cost(input,
weight=None, weight=None,
name=None, name=None,
evaluator=classification_error_evaluator, evaluator=classification_error_evaluator,
layer_attr=None): layer_attr=None,
coeff=1.):
""" """
classification cost Layer. classification cost Layer.
...@@ -3855,6 +3856,8 @@ def classification_cost(input, ...@@ -3855,6 +3856,8 @@ def classification_cost(input,
:param evaluator: Evaluator method. :param evaluator: Evaluator method.
:param layer_attr: layer's extra attribute. :param layer_attr: layer's extra attribute.
:type layer_attr: ExtraLayerAttribute :type layer_attr: ExtraLayerAttribute
:param coeff: The coefficient affects the gradient in the backward.
:type coeff: float
:return: LayerOutput object. :return: LayerOutput object.
:rtype: LayerOutput :rtype: LayerOutput
""" """
...@@ -3868,6 +3871,7 @@ def classification_cost(input, ...@@ -3868,6 +3871,7 @@ def classification_cost(input,
name=name, name=name,
type="multi-class-cross-entropy", type="multi-class-cross-entropy",
inputs=ipts, inputs=ipts,
coeff=coeff,
**ExtraLayerAttribute.to_kwargs(layer_attr)) **ExtraLayerAttribute.to_kwargs(layer_attr))
def __add_evaluator__(e): def __add_evaluator__(e):
......
...@@ -45,12 +45,12 @@ __all__ = ['data', 'parse_network'] ...@@ -45,12 +45,12 @@ __all__ = ['data', 'parse_network']
def __need_to_keep__(name): def __need_to_keep__(name):
return name in [ return name in [
'StaticInput', 'SubsequenceInput', 'GeneratedInput', 'LayerType', 'StaticInput', 'SubsequenceInput', 'GeneratedInput', 'LayerType',
'layer_support' 'layer_support', 'BaseGeneratedInput'
] ]
def __need_to_wrap__(name): def __need_to_wrap__(name):
return name not in ['AggregateLevel', 'ExpandLevel'] return name not in ['AggregateLevel', 'ExpandLevel', 'BaseGeneratedInput']
def __convert_name__(inname): def __convert_name__(inname):
...@@ -199,6 +199,15 @@ def __get_used_submodels__(layer_names): ...@@ -199,6 +199,15 @@ def __get_used_submodels__(layer_names):
return submodel_names return submodel_names
def __get_submodel_data_out_links__():
data_links = set()
for submodel in cp.g_config.model_config.sub_models:
for link in submodel.out_links:
if cp.g_layer_map[link.link_name].type == 'data':
data_links.add(link.link_name)
return data_links
def __get_used_evaluators__(layer_names): def __get_used_evaluators__(layer_names):
evaluator_names = set() evaluator_names = set()
for e in cp.g_config.model_config.evaluators: for e in cp.g_config.model_config.evaluators:
...@@ -264,6 +273,7 @@ def parse_network(output_layers, extra_layers=None): ...@@ -264,6 +273,7 @@ def parse_network(output_layers, extra_layers=None):
submodel_names = __get_used_submodels__(layer_names) submodel_names = __get_used_submodels__(layer_names)
submodel_names.add('root') submodel_names.add('root')
evaluator_names = __get_used_evaluators__(layer_names) evaluator_names = __get_used_evaluators__(layer_names)
data_out_links = __get_submodel_data_out_links__()
input_layer_names = set() input_layer_names = set()
output_layer_names = set() output_layer_names = set()
...@@ -279,7 +289,7 @@ def parse_network(output_layers, extra_layers=None): ...@@ -279,7 +289,7 @@ def parse_network(output_layers, extra_layers=None):
continue continue
model_config.layers.extend([l]) model_config.layers.extend([l])
if l.type == 'data': if l.type == 'data':
if l.name in model_config.output_layer_names: if l.name in data_out_links:
""" """
In text generation, the outlink to save the generated word In text generation, the outlink to save the generated word
indices is a data_layer defined in recurrent_group. This indices is a data_layer defined in recurrent_group. This
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册