提交 5ecc1a21 编写于 作者: Y Yu Yang

Merge branch 'feature/unify_print_layer_evaluator' into feature/EvaluatorValue

...@@ -993,12 +993,14 @@ public: ...@@ -993,12 +993,14 @@ public:
virtual void eval(const NeuralNetwork& nn) { virtual void eval(const NeuralNetwork& nn) {
for (const std::string& name : config_.input_layers()) { for (const std::string& name : config_.input_layers()) {
std::vector<std::tuple<std::string, std::string>> out; auto& argu = nn.getLayer(name)->getOutput();
auto err = nn.getLayerOutputValue(name, &out); std::unordered_map<std::string, std::string> out;
err.check(); argu.getValueString(&out);
for (auto& each : out) { for (auto field : {"value", "id", "sequence pos", "sub-sequence pos"}) {
LOG(INFO) << "layer=" << name << std::get<0>(each) << ":\n" auto it = out.find(field);
<< std::get<1>(each); if (it != out.end()) {
LOG(INFO) << "layer=" << name << " " << field << ":\n" << it->second;
}
} }
} }
} }
......
...@@ -405,42 +405,4 @@ NeuralNetwork* NeuralNetwork::newNeuralNetwork(const std::string& name, ...@@ -405,42 +405,4 @@ NeuralNetwork* NeuralNetwork::newNeuralNetwork(const std::string& name,
} }
} }
Error NeuralNetwork::getLayerOutputValue(
const std::string& layerName,
std::vector<std::tuple<std::string, std::string>>* out) const {
auto& layers = this->config_.layers();
auto it = std::find_if(
layers.begin(), layers.end(), [&layerName](const LayerConfig& conf) {
return conf.name() == layerName;
});
if (it == layers.end()) {
return Error("Cannot find layer %s", layerName.c_str());
}
auto& layer = this->getLayer(layerName);
out->reserve(4);
auto& argu = layer->getOutput();
if (argu.value) {
std::ostringstream os;
argu.value->print(os);
out->push_back({"value", os.str()});
}
if (argu.ids) {
std::ostringstream os;
argu.ids->print(os, argu.ids->getSize());
out->push_back({"ids", os.str()});
}
if (auto startPos = argu.sequenceStartPositions) {
std::ostringstream os;
startPos->getVector(false)->print(os, startPos->getSize());
out->push_back({"sequence pos", os.str()});
}
if (auto subStartPos = argu.subSequenceStartPositions) {
std::ostringstream os;
subStartPos->getVector(false)->print(os, subStartPos->getSize());
out->push_back({"sub-sequence pos", os.str()});
}
return Error();
}
} // namespace paddle } // namespace paddle
...@@ -128,10 +128,6 @@ public: ...@@ -128,10 +128,6 @@ public:
static NeuralNetwork* newNeuralNetwork(const std::string& name = "", static NeuralNetwork* newNeuralNetwork(const std::string& name = "",
NeuralNetwork* rootNetwork = nullptr); NeuralNetwork* rootNetwork = nullptr);
inline Error __must_check getLayerOutputValue(
const std::string& layerName,
std::vector<std::tuple<std::string, std::string>>* out) const;
protected: protected:
/** /**
* The constructor of NeuralNetwork. * The constructor of NeuralNetwork.
......
...@@ -26,28 +26,15 @@ public: ...@@ -26,28 +26,15 @@ public:
void PrintLayer::forward(PassType passType) { void PrintLayer::forward(PassType passType) {
Layer::forward(passType); Layer::forward(passType);
for (size_t i = 0; i != inputLayers_.size(); ++i) { for (size_t i = 0; i != inputLayers_.size(); ++i) {
const auto& argu = getInput(i); auto& argu = getInput(i);
const std::string& name = inputLayers_[i]->getName(); const std::string& name = inputLayers_[i]->getName();
if (argu.value) { std::unordered_map<std::string, std::string> out;
std::ostringstream os; argu.getValueString(&out);
argu.value->print(os); for (auto field : {"value", "id", "sequence pos", "sub-sequence pos"}) {
LOG(INFO) << "layer=" << name << " value matrix:\n" << os.str(); auto it = out.find(field);
if (it != out.end()) {
LOG(INFO) << "layer=" << name << " " << field << ":\n" << it->second;
} }
if (argu.ids) {
std::ostringstream os;
argu.ids->print(os, argu.ids->getSize());
LOG(INFO) << "layer=" << name << " ids vector:\n" << os.str();
}
if (auto startPos = argu.sequenceStartPositions) {
std::ostringstream os;
startPos->getVector(false)->print(os, startPos->getSize());
LOG(INFO) << "layer=" << name << " sequence pos vector:\n" << os.str();
}
if (auto subStartPos = argu.subSequenceStartPositions) {
std::ostringstream os;
subStartPos->getVector(false)->print(os, subStartPos->getSize());
LOG(INFO) << "layer=" << name << " sub-sequence pos vector:\n"
<< os.str();
} }
} }
} }
......
...@@ -602,6 +602,32 @@ void Argument::degradeSequence(const Argument& input, bool useGpu) { ...@@ -602,6 +602,32 @@ void Argument::degradeSequence(const Argument& input, bool useGpu) {
tgtBuf[numSequences] = numSubSequences; tgtBuf[numSequences] = numSubSequences;
} }
void Argument::getValueString(
std::unordered_map<std::string, std::string>* out) const {
if (value) {
std::ostringstream os;
value->print(os);
out->insert({"value", os.str()});
}
if (ids) {
std::ostringstream os;
ids->print(os, ids->getSize());
out->insert({"ids", os.str()});
}
if (sequenceStartPositions) {
std::ostringstream os;
sequenceStartPositions->getVector(false)->print(
os, sequenceStartPositions->getSize());
out->insert({"sequence pos", os.str()});
}
if (subSequenceStartPositions) {
std::ostringstream os;
subSequenceStartPositions->getVector(false)->print(
os, subSequenceStartPositions->getSize());
out->insert({"sub-sequence pos", os.str()});
}
}
void Argument::subArgFrom(const Argument& input, void Argument::subArgFrom(const Argument& input,
size_t offset, size_t offset,
size_t height, size_t height,
......
...@@ -297,6 +297,14 @@ struct Argument { ...@@ -297,6 +297,14 @@ struct Argument {
sequence has sub-sequence degrades to a sequence. sequence has sub-sequence degrades to a sequence.
*/ */
void degradeSequence(const Argument& input, bool useGpu); void degradeSequence(const Argument& input, bool useGpu);
/**
* @brief getValueString will return the argument's output in string. There
* are several kinds of output. The keys of output dictionary are 'value',
* 'id', 'sequence pos', 'sub-sequence pos'.
* @param out [out]: the return values.
*/
void getValueString(std::unordered_map<std::string, std::string>* out) const;
}; };
} // namespace paddle } // namespace paddle
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册