提交 3fffcd49 编写于 作者: Y Yu Yang 提交者: GitHub

Merge pull request #1755 from reyoung/feature/add_any_in_paddle

Using linb::any/std::any instead of FunctionConfig
...@@ -64,6 +64,7 @@ include(external/python) # download, build, install python ...@@ -64,6 +64,7 @@ include(external/python) # download, build, install python
include(external/openblas) # download, build, install openblas include(external/openblas) # download, build, install openblas
include(external/swig) # download, build, install swig include(external/swig) # download, build, install swig
include(external/warpctc) # download, build, install warpctc include(external/warpctc) # download, build, install warpctc
include(external/any) # download libn::any
include(package) # set paddle packages include(package) # set paddle packages
include(cpplint) # set paddle c++ style include(cpplint) # set paddle c++ style
......
INCLUDE(ExternalProject)
SET(ANY_SOURCE_DIR ${THIRD_PARTY_PATH}/any)
INCLUDE_DIRECTORIES(${ANY_SOURCE_DIR}/src/linb_any)
ExternalProject_Add(
linb_any
${EXTERNAL_PROJECT_LOG_ARGS}
GIT_REPOSITORY "https://github.com/thelink2012/any.git"
GIT_TAG "8fef1e93710a0edf8d7658999e284a1142c4c020"
PREFIX ${ANY_SOURCE_DIR}
UPDATE_COMMAND ""
CONFIGURE_COMMAND ""
BUILD_COMMAND ""
INSTALL_COMMAND ""
TEST_COMMAND ""
)
add_definitions(-DANY_IMPL_ANY_CAST_MOVEABLE)
...@@ -16,66 +16,6 @@ limitations under the License. */ ...@@ -16,66 +16,6 @@ limitations under the License. */
namespace paddle { namespace paddle {
template <>
size_t FuncConfig::get<size_t>(const std::string& key) const {
auto it = valueMap_.find(key);
CHECK(it != valueMap_.end()) << "Cannot find value: '" << key << "'";
return it->second.s;
}
template <>
real FuncConfig::get<real>(const std::string& key) const {
auto it = valueMap_.find(key);
CHECK(it != valueMap_.end()) << "Cannot find value: '" << key << "'";
return it->second.r;
}
template <>
int FuncConfig::get<int>(const std::string& key) const {
auto it = valueMap_.find(key);
CHECK(it != valueMap_.end()) << "Cannot find value: '" << key << "'";
return it->second.i;
}
template <>
bool FuncConfig::get<bool>(const std::string& key) const {
auto it = valueMap_.find(key);
CHECK(it != valueMap_.end()) << "Cannot find value: '" << key << "'";
return it->second.b;
}
template <>
FuncConfig& FuncConfig::set<size_t>(const std::string& key, size_t v) {
CHECK_EQ(static_cast<int>(valueMap_.count(key)), 0) << "Duplicated value: "
<< key;
valueMap_[key].s = v;
return *this;
}
template <>
FuncConfig& FuncConfig::set<real>(const std::string& key, real v) {
CHECK_EQ(static_cast<int>(valueMap_.count(key)), 0) << "Duplicated value: "
<< key;
valueMap_[key].r = v;
return *this;
}
template <>
FuncConfig& FuncConfig::set<int>(const std::string& key, int v) {
CHECK_EQ(static_cast<int>(valueMap_.count(key)), 0) << "Duplicated value: "
<< key;
valueMap_[key].i = v;
return *this;
}
template <>
FuncConfig& FuncConfig::set<bool>(const std::string& key, bool v) {
CHECK_EQ(static_cast<int>(valueMap_.count(key)), 0) << "Duplicated value: "
<< key;
valueMap_[key].b = v;
return *this;
}
void BufferArgs::addArg(const Matrix& arg, void BufferArgs::addArg(const Matrix& arg,
const TensorShape& shape, const TensorShape& shape,
ArgType argType) { ArgType argType) {
......
...@@ -18,32 +18,49 @@ limitations under the License. */ ...@@ -18,32 +18,49 @@ limitations under the License. */
#include <vector> #include <vector>
#include "BufferArg.h" #include "BufferArg.h"
#include "paddle/math/Matrix.h" #include "paddle/math/Matrix.h"
#include "paddle/utils/Any.h"
#include "paddle/utils/ClassRegistrar.h" #include "paddle/utils/ClassRegistrar.h"
#include "paddle/utils/Error.h"
namespace paddle { namespace paddle {
/** /**
* Function Configuration. * Function Configuration.
* The argument type of Function::init. * The argument type of Function::init.
* Follow-up will consider moving this data structure to Proto inside.
*/ */
class FuncConfig { class FuncConfig {
public: public:
union value {
size_t s;
real r;
int i;
bool b;
};
template <typename T> template <typename T>
T get(const std::string& key) const; T get(const std::string& key, Error* err = nullptr) const {
try {
return any_cast<T>(valueMap_.at(key));
} catch (std::exception& e) { // could be cast or out of range exception.
if (err) {
*err = Error(e.what());
} else {
LOG(FATAL) << "Cannot get key " << key << "with error " << e.what();
}
return T();
}
}
template <typename T> template <typename T>
FuncConfig& set(const std::string& key, T v); FuncConfig& set(const std::string& key, T v, Error* err = nullptr) {
auto it = valueMap_.find(key);
if (it != valueMap_.end()) { // already contains key.
if (err) {
*err = Error("Key %s is already set in FuncConfig", key.c_str());
} else {
LOG(FATAL) << "Key " << key << " is already set in FuncConfig.";
}
return *this;
}
valueMap_[key] = any(v);
return *this;
}
protected: protected:
std::map<std::string, value> valueMap_; mutable std::unordered_map<std::string, any> valueMap_;
}; };
/** /**
......
...@@ -25,9 +25,9 @@ void Pad<DEVICE_TYPE_CPU>(real* outputs, ...@@ -25,9 +25,9 @@ void Pad<DEVICE_TYPE_CPU>(real* outputs,
const int inH, const int inH,
const int inW, const int inW,
const PadConf& pad) { const PadConf& pad) {
int cstart = pad.channelStart, cend = pad.channelEnd; int cstart = pad.channel[0], cend = pad.channel[1];
int hstart = pad.heightStart, hend = pad.heightEnd; int hstart = pad.height[0], hend = pad.height[1];
int wstart = pad.widthStart, wend = pad.widthEnd; int wstart = pad.width[0], wend = pad.width[1];
int outC = inC + cstart + cend; int outC = inC + cstart + cend;
int outH = inH + hstart + hend; int outH = inH + hstart + hend;
int outW = inW + wstart + wend; int outW = inW + wstart + wend;
...@@ -51,9 +51,9 @@ void PadGrad<DEVICE_TYPE_CPU>(real* inGrad, ...@@ -51,9 +51,9 @@ void PadGrad<DEVICE_TYPE_CPU>(real* inGrad,
const int inH, const int inH,
const int inW, const int inW,
const PadConf& pad) { const PadConf& pad) {
int cstart = pad.channelStart, cend = pad.channelEnd; int cstart = pad.channel[0], cend = pad.channel[1];
int hstart = pad.heightStart, hend = pad.heightEnd; int hstart = pad.height[0], hend = pad.height[1];
int wstart = pad.widthStart, wend = pad.widthEnd; int wstart = pad.width[0], wend = pad.width[1];
int outC = inC + cstart + cend; int outC = inC + cstart + cend;
int outH = inH + hstart + hend; int outH = inH + hstart + hend;
int outW = inW + wstart + wend; int outW = inW + wstart + wend;
...@@ -71,6 +71,12 @@ void PadGrad<DEVICE_TYPE_CPU>(real* inGrad, ...@@ -71,6 +71,12 @@ void PadGrad<DEVICE_TYPE_CPU>(real* inGrad,
} }
} }
static inline PadConf castToPadConf(const FuncConfig& conf) {
return {conf.get<std::vector<uint32_t>>("channel"),
conf.get<std::vector<uint32_t>>("height"),
conf.get<std::vector<uint32_t>>("width")};
}
/** /**
* \brief Padding zeros to input according to the specify dimension. * \brief Padding zeros to input according to the specify dimension.
* The struct pad_ contains the padding size in each dimension. * The struct pad_ contains the padding size in each dimension.
...@@ -127,14 +133,7 @@ void PadGrad<DEVICE_TYPE_CPU>(real* inGrad, ...@@ -127,14 +133,7 @@ void PadGrad<DEVICE_TYPE_CPU>(real* inGrad,
template <DeviceType Device> template <DeviceType Device>
class PadFunc : public FunctionBase { class PadFunc : public FunctionBase {
public: public:
void init(const FuncConfig& config) override { void init(const FuncConfig& config) override { pad_ = castToPadConf(config); }
pad_.channelStart = config.get<int>("cstart");
pad_.channelEnd = config.get<int>("cend");
pad_.heightStart = config.get<int>("hstart");
pad_.heightEnd = config.get<int>("hend");
pad_.widthStart = config.get<int>("wstart");
pad_.widthEnd = config.get<int>("wend");
}
void calc(const BufferArgs& inputs, const BufferArgs& outputs) override { void calc(const BufferArgs& inputs, const BufferArgs& outputs) override {
CHECK_EQ(1UL, inputs.size()); CHECK_EQ(1UL, inputs.size());
...@@ -175,14 +174,7 @@ private: ...@@ -175,14 +174,7 @@ private:
template <DeviceType Device> template <DeviceType Device>
class PadGradFunc : public FunctionBase { class PadGradFunc : public FunctionBase {
public: public:
void init(const FuncConfig& config) override { void init(const FuncConfig& config) override { pad_ = castToPadConf(config); }
pad_.channelStart = config.get<int>("cstart");
pad_.channelEnd = config.get<int>("cend");
pad_.heightStart = config.get<int>("hstart");
pad_.heightEnd = config.get<int>("hend");
pad_.widthStart = config.get<int>("wstart");
pad_.widthEnd = config.get<int>("wend");
}
void calc(const BufferArgs& inputs, const BufferArgs& outputs) override { void calc(const BufferArgs& inputs, const BufferArgs& outputs) override {
CHECK_EQ(1UL, inputs.size()); CHECK_EQ(1UL, inputs.size());
......
...@@ -19,18 +19,12 @@ limitations under the License. */ ...@@ -19,18 +19,12 @@ limitations under the License. */
namespace paddle { namespace paddle {
struct PadConf { struct PadConf {
/// how many values to add before the data along channel dimension. /// how many values to add before/after the data along channel dimension.
int channelStart; std::vector<uint32_t> channel;
/// how many values to add after the data along channel dimension. /// how many values to add before/after the data along height dimension.
int channelEnd; std::vector<uint32_t> height;
/// how many values to add before the data along height dimension. /// how many values to add before/after the data along width dimension.
int heightStart; std::vector<uint32_t> width;
/// how many values to add after the data along height dimension.
int heightEnd;
/// how many values to add before the data along width dimension.
int widthStart;
/// how many values to add after the data along width dimension.
int widthEnd;
}; };
/** /**
......
...@@ -36,12 +36,9 @@ bool PadLayer::init(const LayerMap& layerMap, ...@@ -36,12 +36,9 @@ bool PadLayer::init(const LayerMap& layerMap,
CHECK_EQ(2, pad_conf.pad_c_size()); CHECK_EQ(2, pad_conf.pad_c_size());
CHECK_EQ(2, pad_conf.pad_h_size()); CHECK_EQ(2, pad_conf.pad_h_size());
CHECK_EQ(2, pad_conf.pad_w_size()); CHECK_EQ(2, pad_conf.pad_w_size());
padc_.push_back(pad_conf.pad_c(0)); padc_ = {pad_conf.pad_c(0), pad_conf.pad_c(1)};
padc_.push_back(pad_conf.pad_c(1)); padh_ = {pad_conf.pad_h(0), pad_conf.pad_h(1)};
padh_.push_back(pad_conf.pad_h(0)); padw_ = {pad_conf.pad_w(0), pad_conf.pad_w(1)};
padh_.push_back(pad_conf.pad_h(1));
padw_.push_back(pad_conf.pad_w(0));
padw_.push_back(pad_conf.pad_w(1));
outDims_ = TensorShape(4); outDims_ = TensorShape(4);
setOutDims(0); setOutDims(0);
...@@ -49,21 +46,15 @@ bool PadLayer::init(const LayerMap& layerMap, ...@@ -49,21 +46,15 @@ bool PadLayer::init(const LayerMap& layerMap,
createFunction(forward_, createFunction(forward_,
"Pad", "Pad",
FuncConfig() FuncConfig()
.set("cstart", padc_[0]) .set("channel", padc_)
.set("cend", padc_[1]) .set("height", padh_)
.set("hstart", padh_[0]) .set("width", padw_));
.set("hend", padh_[1])
.set("wstart", padw_[0])
.set("wend", padw_[1]));
createFunction(backward_, createFunction(backward_,
"PadGrad", "PadGrad",
FuncConfig() FuncConfig()
.set("cstart", padc_[0]) .set("channel", padc_)
.set("cend", padc_[1]) .set("height", padh_)
.set("hstart", padh_[0]) .set("width", padw_));
.set("hend", padh_[1])
.set("wstart", padw_[0])
.set("wend", padw_[1]));
return true; return true;
} }
......
...@@ -38,9 +38,9 @@ protected: ...@@ -38,9 +38,9 @@ protected:
void setOutDims(const size_t batchSize); void setOutDims(const size_t batchSize);
void setTensorDim(const size_t batchSize); void setTensorDim(const size_t batchSize);
std::vector<int> padc_; std::vector<uint32_t> padc_;
std::vector<int> padh_; std::vector<uint32_t> padh_;
std::vector<int> padw_; std::vector<uint32_t> padw_;
TensorShape inDims_; TensorShape inDims_;
TensorShape outDims_; TensorShape outDims_;
}; };
......
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#if __cplusplus > 201402L
#include <any>
namespace paddle {
// using std::any for C++ 17
using std::any;
using std::any_cast;
using std::bad_any_cast;
} // namespace paddle
#else
#include <any.hpp>
namespace paddle {
// use linb::any for C++ 11
using linb::any;
using linb::any_cast;
using linb::bad_any_cast;
} // namespace paddle
#endif
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册