提交 fc47492f 编写于 作者: L liaogang

Fix merge conflict bug and glog

上级 b090ce32
...@@ -12,8 +12,6 @@ endif() ...@@ -12,8 +12,6 @@ endif()
add_library(paddle_function STATIC ${cpp_files} ${cu_objs}) add_library(paddle_function STATIC ${cpp_files} ${cu_objs})
add_dependencies(paddle_function ${external_project_dependencies}) add_dependencies(paddle_function ${external_project_dependencies})
add_library(paddle_test_main STATIC TestMain.cpp)
add_dependencies(paddle_test_main ${external_project_dependencies})
if(WITH_GPU) if(WITH_GPU)
if(WITH_TESTING) if(WITH_TESTING)
......
...@@ -85,15 +85,15 @@ public: ...@@ -85,15 +85,15 @@ public:
void calc(const Arguments& inputs, void calc(const Arguments& inputs,
const Arguments& outputs, const Arguments& outputs,
const Arguments& inouts) override { const Arguments& inouts) override {
CHECK_EQ(3, inputs.size()); CHECK_EQ(3, static_cast<int>(inputs.size()));
CHECK_EQ(1, outputs.size()); CHECK_EQ(1, static_cast<int>(outputs.size()));
CHECK_EQ(0, inouts.size()); CHECK_EQ(0, static_cast<int>(inouts.size()));
CHECK(outputs[0].getData() && inputs[0].getData() && inputs[2].getData()); CHECK(outputs[0].getData() && inputs[0].getData() && inputs[2].getData());
CHECK_EQ(outputs[0].dims_.size(), 2); CHECK_EQ(static_cast<int>(outputs[0].dims_.size()), 2);
CHECK_EQ(inputs[0].dims_.size(), 2); CHECK_EQ(static_cast<int>(inputs[0].dims_.size()), 2);
CHECK_EQ(inputs[1].dims_.size(), 2); CHECK_EQ(static_cast<int>(inputs[1].dims_.size()), 2);
CHECK_EQ(inputs[2].dims_.size(), 1); CHECK_EQ(static_cast<int>(inputs[2].dims_.size()), 1);
/// dim of output = dim of input * context_length /// dim of output = dim of input * context_length
CHECK_EQ(outputs[0].dims_[1], inputs[0].dims_[1] * context_length_); CHECK_EQ(outputs[0].dims_[1], inputs[0].dims_[1] * context_length_);
/// dim of input == dim of weight /// dim of input == dim of weight
...@@ -202,15 +202,15 @@ public: ...@@ -202,15 +202,15 @@ public:
void calc(const Arguments& inputs, void calc(const Arguments& inputs,
const Arguments& outputs, const Arguments& outputs,
const Arguments& inouts) override { const Arguments& inouts) override {
CHECK_EQ(3, inputs.size()); CHECK_EQ(3, static_cast<int>(inputs.size()));
CHECK_EQ(1, outputs.size()); CHECK_EQ(1, static_cast<int>(outputs.size()));
CHECK_EQ(0, inouts.size()); CHECK_EQ(0, static_cast<int>(inouts.size()));
CHECK(outputs[0].getData() && inputs[2].getData()); CHECK(outputs[0].getData() && inputs[2].getData());
CHECK_EQ(outputs[0].dims_.size(), 2); CHECK_EQ(static_cast<int>(outputs[0].dims_.size()), 2);
CHECK_EQ(inputs[0].dims_.size(), 2); CHECK_EQ(static_cast<int>(inputs[0].dims_.size()), 2);
CHECK_EQ(inputs[1].dims_.size(), 2); CHECK_EQ(static_cast<int>(inputs[1].dims_.size()), 2);
CHECK_EQ(inputs[2].dims_.size(), 1); CHECK_EQ(static_cast<int>(inputs[2].dims_.size()), 1);
/// dim of input == dim of weight /// dim of input == dim of weight
CHECK_EQ(inputs[0].dims_[1], inputs[1].dims_[1]); CHECK_EQ(inputs[0].dims_[1], inputs[1].dims_[1]);
...@@ -269,13 +269,13 @@ public: ...@@ -269,13 +269,13 @@ public:
void calc(const Arguments& inputs, void calc(const Arguments& inputs,
const Arguments& outputs, const Arguments& outputs,
const Arguments& inouts) override { const Arguments& inouts) override {
CHECK_EQ(2, inputs.size()); CHECK_EQ(2, static_cast<int>(inputs.size()));
CHECK_EQ(1, outputs.size()); CHECK_EQ(1, static_cast<int>(outputs.size()));
CHECK_EQ(0, inouts.size()); CHECK_EQ(0, static_cast<int>(inouts.size()));
CHECK(inputs[0].getData() && outputs[0].getData() && inputs[1].getData()); CHECK(inputs[0].getData() && outputs[0].getData() && inputs[1].getData());
CHECK_EQ(outputs[0].dims_.size(), 2); CHECK_EQ(static_cast<int>(outputs[0].dims_.size()), 2);
CHECK_EQ(inputs[0].dims_.size(), 2); CHECK_EQ(static_cast<int>(inputs[0].dims_.size()), 2);
CHECK_EQ(inputs[1].dims_.size(), 1); CHECK_EQ(static_cast<int>(inputs[1].dims_.size()), 1);
CHECK_EQ(outputs[0].dims_[1], inputs[0].dims_[1] * context_length_); CHECK_EQ(outputs[0].dims_[1], inputs[0].dims_[1] * context_length_);
/// input and output has the same batch_size /// input and output has the same batch_size
CHECK_EQ(inputs[0].dims_[0], outputs[0].dims_[0]); CHECK_EQ(inputs[0].dims_[0], outputs[0].dims_[0]);
...@@ -317,14 +317,14 @@ public: ...@@ -317,14 +317,14 @@ public:
void calc(const Arguments& inputs, void calc(const Arguments& inputs,
const Arguments& outputs, const Arguments& outputs,
const Arguments& inouts) override { const Arguments& inouts) override {
CHECK_EQ(2, inputs.size()); CHECK_EQ(2, static_cast<int>(inputs.size()));
CHECK_EQ(1, outputs.size()); CHECK_EQ(1, static_cast<int>(outputs.size()));
CHECK_EQ(0, inouts.size()); CHECK_EQ(0, static_cast<int>(inouts.size()));
CHECK(inputs[0].getData() && outputs[0].getData() && inputs[1].getData()); CHECK(inputs[0].getData() && outputs[0].getData() && inputs[1].getData());
CHECK_EQ(outputs[0].dims_.size(), 2); CHECK_EQ(static_cast<int>(outputs[0].dims_.size()), 2);
CHECK_EQ(inputs[0].dims_.size(), 2); CHECK_EQ(static_cast<int>(inputs[0].dims_.size()), 2);
CHECK_EQ(inputs[1].dims_.size(), 1); CHECK_EQ(static_cast<int>(inputs[1].dims_.size()), 1);
CHECK_EQ(outputs[0].dims_[1], inputs[0].dims_[1] * context_length_); CHECK_EQ(outputs[0].dims_[1], inputs[0].dims_[1] * context_length_);
auto out_grad_mat = std::make_shared<typename MatrixT<Device>::type>( auto out_grad_mat = std::make_shared<typename MatrixT<Device>::type>(
......
...@@ -46,28 +46,32 @@ bool FuncConfig::get<bool>(const std::string& key) const { ...@@ -46,28 +46,32 @@ bool FuncConfig::get<bool>(const std::string& key) const {
template <> template <>
FuncConfig& FuncConfig::set<size_t>(const std::string& key, size_t v) { FuncConfig& FuncConfig::set<size_t>(const std::string& key, size_t v) {
CHECK_EQ(valueMap_.count(key), 0) << "Duplicated value: " << key; CHECK_EQ(static_cast<int>(valueMap_.count(key)), 0) << "Duplicated value: "
<< key;
valueMap_[key].s = v; valueMap_[key].s = v;
return *this; return *this;
} }
template <> template <>
FuncConfig& FuncConfig::set<real>(const std::string& key, real v) { FuncConfig& FuncConfig::set<real>(const std::string& key, real v) {
CHECK_EQ(valueMap_.count(key), 0) << "Duplicated value: " << key; CHECK_EQ(static_cast<int>(valueMap_.count(key)), 0) << "Duplicated value: "
<< key;
valueMap_[key].r = v; valueMap_[key].r = v;
return *this; return *this;
} }
template <> template <>
FuncConfig& FuncConfig::set<int>(const std::string& key, int v) { FuncConfig& FuncConfig::set<int>(const std::string& key, int v) {
CHECK_EQ(valueMap_.count(key), 0) << "Duplicated value: " << key; CHECK_EQ(static_cast<int>(valueMap_.count(key)), 0) << "Duplicated value: "
<< key;
valueMap_[key].i = v; valueMap_[key].i = v;
return *this; return *this;
} }
template <> template <>
FuncConfig& FuncConfig::set<bool>(const std::string& key, bool v) { FuncConfig& FuncConfig::set<bool>(const std::string& key, bool v) {
CHECK_EQ(valueMap_.count(key), 0) << "Duplicated value: " << key; CHECK_EQ(static_cast<int>(valueMap_.count(key)), 0) << "Duplicated value: "
<< key;
valueMap_[key].b = v; valueMap_[key].b = v;
return *this; return *this;
} }
......
...@@ -111,7 +111,8 @@ void ContextProjection::forward() { ...@@ -111,7 +111,8 @@ void ContextProjection::forward() {
size_t dim = out_->value->getWidth(); size_t dim = out_->value->getWidth();
CHECK_EQ(dim, input_dim * config_.context_length()); CHECK_EQ(dim, input_dim * config_.context_length());
size_t batch_size = in_->value->getHeight(); size_t batch_size = in_->value->getHeight();
CHECK_EQ(forward_.size(), 1) << "Only one forward function here"; CHECK_EQ(static_cast<int>(forward_.size()), 1)
<< "Only one forward function here";
REGISTER_TIMER_INFO("ContextProjectionForward", getName().c_str()); REGISTER_TIMER_INFO("ContextProjectionForward", getName().c_str());
bool is_padding = config_.trainable_padding(); bool is_padding = config_.trainable_padding();
...@@ -154,7 +155,8 @@ void ContextProjection::backward(const UpdateCallback& callback) { ...@@ -154,7 +155,8 @@ void ContextProjection::backward(const UpdateCallback& callback) {
CHECK_EQ(dim, input_dim * config_.context_length()); CHECK_EQ(dim, input_dim * config_.context_length());
size_t batch_size = in_->value->getHeight(); size_t batch_size = in_->value->getHeight();
CHECK_EQ(batch_size, out_->value->getHeight()); CHECK_EQ(batch_size, out_->value->getHeight());
CHECK_EQ(backward_.size(), 1) << "Only one backward function here"; CHECK_EQ(static_cast<int>(backward_.size()), 1)
<< "Only one backward function here";
REGISTER_TIMER_INFO("ContextProjectionBackward", getName().c_str()); REGISTER_TIMER_INFO("ContextProjectionBackward", getName().c_str());
bool is_padding = config_.trainable_padding(); bool is_padding = config_.trainable_padding();
......
warp-ctc @ bd535c8d
Subproject commit bd535c8d44e03c8ebd2d768e06c8c05fdccd11d2
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册