提交 502e7259 编写于 作者: Z zchen0211

Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into develop

...@@ -189,7 +189,7 @@ OpDesc { ...@@ -189,7 +189,7 @@ OpDesc {
inputs = {0} // the index of x in vars of BlockDesc above inputs = {0} // the index of x in vars of BlockDesc above
outputs = {5, 3} // indices of act and hidden_out in vars of BlockDesc above outputs = {5, 3} // indices of act and hidden_out in vars of BlockDesc above
attrs { attrs {
"memories" : {1} // the index of h "states" : {1} // the index of h
"step_net" : <above step net> "step_net" : <above step net>
} }
}; };
......
...@@ -28,23 +28,37 @@ add_style_check_target(paddle_capi ${CAPI_SOURCES} ${CAPI_HEADER} ...@@ -28,23 +28,37 @@ add_style_check_target(paddle_capi ${CAPI_SOURCES} ${CAPI_HEADER}
add_dependencies(paddle_capi paddle_proto) add_dependencies(paddle_capi paddle_proto)
# combine all paddle static libraries together, into libpaddle_capi_whole.a # TODO: paddle_capi_whole will be removed.
# user should use PaddleCAPI as -lpaddle_capi_whole if(MOBILE_INFERENCE)
set(PADDLE_CAPI_INFER_LIBS set(PADDLE_CAPI_INFER_LIBS
paddle_utils paddle_utils
paddle_parameter paddle_parameter
paddle_math paddle_math
paddle_cuda paddle_cuda
paddle_function paddle_function
paddle_gserver paddle_gserver
paddle_proto) paddle_proto)
else()
set(PADDLE_CAPI_INFER_LIBS
paddle_utils
paddle_parameter
paddle_math
paddle_cuda
paddle_function
paddle_gserver
paddle_proto
paddle_pserver
paddle_network)
endif()
cc_library(paddle_capi_whole DEPS paddle_capi ${PADDLE_CAPI_INFER_LIBS}) cc_library(paddle_capi_whole DEPS paddle_capi ${PADDLE_CAPI_INFER_LIBS})
# No shared library for iOS # Link the static library for inference
cc_library(paddle_capi_engine DEPS paddle_capi paddle_utils paddle_parameter paddle_math paddle_cuda paddle_proto)
cc_library(paddle_capi_layers DEPS paddle_function paddle_gserver)
# Link the shared library for inference
if(NOT IOS) if(NOT IOS)
set(LINK_FLAGS " -Wl,--retain-symbols-file ${CMAKE_CURRENT_SOURCE_DIR}/export.sym -Wl,--version-script ${CMAKE_CURRENT_SOURCE_DIR}/export.map") set(LINK_FLAGS "-Wl,--version-script ${CMAKE_CURRENT_SOURCE_DIR}/paddle_capi.map")
# TODO: merge mkl into paddle_capi_shared
add_library(paddle_capi_shared SHARED ${CAPI_SOURCES}) add_library(paddle_capi_shared SHARED ${CAPI_SOURCES})
set_target_properties(paddle_capi_shared PROPERTIES LINK_FLAGS "${LINK_FLAGS}") set_target_properties(paddle_capi_shared PROPERTIES LINK_FLAGS "${LINK_FLAGS}")
target_include_directories(paddle_capi_shared PUBLIC ${CMAKE_CURRENT_BINARY_DIR}) target_include_directories(paddle_capi_shared PUBLIC ${CMAKE_CURRENT_BINARY_DIR})
...@@ -53,9 +67,10 @@ endif() ...@@ -53,9 +67,10 @@ endif()
# install library & headers. # install library & headers.
install(FILES ${CAPI_HEADERS} DESTINATION include/paddle) install(FILES ${CAPI_HEADERS} DESTINATION include/paddle)
install(FILES paddle_capi.map DESTINATION include/paddle)
install(FILES ${CMAKE_CURRENT_BINARY_DIR}/config.h DESTINATION include/paddle) install(FILES ${CMAKE_CURRENT_BINARY_DIR}/config.h DESTINATION include/paddle)
if(ANDROID) if(ANDROID)
install(TARGETS paddle_capi_whole paddle_capi_shared install(TARGETS paddle_capi_whole paddle_capi_engine paddle_capi_layers paddle_capi_shared
ARCHIVE DESTINATION lib/${ANDROID_ABI} ARCHIVE DESTINATION lib/${ANDROID_ABI}
LIBRARY DESTINATION lib/${ANDROID_ABI}) LIBRARY DESTINATION lib/${ANDROID_ABI})
execute_process( execute_process(
...@@ -80,7 +95,7 @@ if(ANDROID) ...@@ -80,7 +95,7 @@ if(ANDROID)
)" )"
) )
else(ANDROID) else(ANDROID)
install(TARGETS paddle_capi_whole ARCHIVE DESTINATION lib) install(TARGETS paddle_capi_whole paddle_capi_engine paddle_capi_layers ARCHIVE DESTINATION lib)
if(NOT IOS) if(NOT IOS)
install(TARGETS paddle_capi_shared DESTINATION lib) install(TARGETS paddle_capi_shared DESTINATION lib)
endif() endif()
......
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include "paddle/framework/block_desc.h" #include "paddle/framework/block_desc.h"
#include "paddle/framework/op_registry.h" #include "paddle/framework/op_registry.h"
#include "paddle/operators/dynamic_recurrent_op.h"
#include "paddle/operators/net_op.h" #include "paddle/operators/net_op.h"
#include "paddle/operators/recurrent_op.h" #include "paddle/operators/recurrent_op.h"
...@@ -220,8 +221,7 @@ static std::unique_ptr<OperatorBase> BackwardRecursive( ...@@ -220,8 +221,7 @@ static std::unique_ptr<OperatorBase> BackwardRecursive(
// process recurrent gradient op as a special operator. // process recurrent gradient op as a special operator.
if (forwardOp.Type() == "recurrent") { if (forwardOp.Type() == "recurrent") {
// NOTE clean up cycle call somewhere (RNN's stepnet constains itself), // NOTE clean up cycle call somewhere (RNN's stepnet constains itself),
// or // or this will result in infinite loop.
// this will result in infinite loop.
const auto& rnnop = const auto& rnnop =
*static_cast<const operators::RecurrentOp*>(&forwardOp); *static_cast<const operators::RecurrentOp*>(&forwardOp);
auto rnn_grad_op = auto rnn_grad_op =
...@@ -231,6 +231,18 @@ static std::unique_ptr<OperatorBase> BackwardRecursive( ...@@ -231,6 +231,18 @@ static std::unique_ptr<OperatorBase> BackwardRecursive(
// create stepnet's gradient op // create stepnet's gradient op
rnn_grad_op->set_stepnet( rnn_grad_op->set_stepnet(
BackwardRecursive(stepnet_op, no_grad_names, grad_to_var, uniq_id)); BackwardRecursive(stepnet_op, no_grad_names, grad_to_var, uniq_id));
} else if (forwardOp.Type() == "dynamic_recurrent") {
// NOTE clean up cycle call somewhere (RNN's stepnet constains itself),
// or this will result in infinite loop.
const auto& rnnop =
*static_cast<const operators::DynamicRecurrentOp*>(&forwardOp);
auto rnn_grad_op =
static_cast<operators::DynamicRecurrentGradientOp*>(grad_op.get());
const auto& stepnet_op =
*static_cast<const OperatorBase*>(&rnnop.rnn.GetStepUnit());
// create stepnet's gradient op
rnn_grad_op->rnn.SetStepUnit(
BackwardRecursive(stepnet_op, no_grad_names, grad_to_var, uniq_id));
} }
if (net->ops_.empty()) { // Current no aux op is added to network if (net->ops_.empty()) { // Current no aux op is added to network
......
...@@ -26,6 +26,8 @@ inline DataType ToDataType(std::type_index type) { ...@@ -26,6 +26,8 @@ inline DataType ToDataType(std::type_index type) {
return DataType::FP64; return DataType::FP64;
} else if (typeid(int).hash_code() == type.hash_code()) { } else if (typeid(int).hash_code() == type.hash_code()) {
return DataType::INT32; return DataType::INT32;
} else if (typeid(int64_t).hash_code() == type.hash_code()) {
return DataType::INT64;
} else { } else {
PADDLE_THROW("Not supported"); PADDLE_THROW("Not supported");
} }
......
...@@ -84,8 +84,7 @@ void Executor::Run(const ProgramDesc& pdesc, Scope* scope, int block_id) { ...@@ -84,8 +84,7 @@ void Executor::Run(const ProgramDesc& pdesc, Scope* scope, int block_id) {
op->Run(local_scope, *device); op->Run(local_scope, *device);
} }
// TODO(tonyyang-svail): scope->DeleteScope(&local_scope);
// - Destroy local_scope
} }
} // namespace framework } // namespace framework
......
...@@ -21,28 +21,28 @@ limitations under the License. */ ...@@ -21,28 +21,28 @@ limitations under the License. */
namespace paddle { namespace paddle {
namespace framework { namespace framework {
template <typename T> void SetFeedVariable(Scope* scope, const LoDTensor& input,
void SetFeedVariable(const LoDTensor& input, const std::string& var_name, const std::string& var_name, size_t index) {
size_t index) {
// If var_name Variable is not found in GlobalScope, a new variable will // If var_name Variable is not found in GlobalScope, a new variable will
// be created. // be created.
VLOG(3) << "SetFeedVariable name=" << var_name << " index=" << index; VLOG(3) << "SetFeedVariable name=" << var_name << " index=" << index;
Variable* g_feed_value = GetGlobalScope().Var(var_name); Variable* g_feed_value = scope->Var(var_name);
auto& feed_inputs = auto& feed_inputs =
*(g_feed_value->GetMutable<std::vector<paddle::framework::LoDTensor>>()); *(g_feed_value->GetMutable<std::vector<paddle::framework::LoDTensor>>());
if (index >= feed_inputs.size()) { if (index >= feed_inputs.size()) {
feed_inputs.resize(index + 1); feed_inputs.resize(index + 1);
} }
// shared data with input tensor // shared data with input tensor
feed_inputs[index].ShareDataWith<T>(input); feed_inputs[index].ShareDataWith(input);
// set lod // set lod
feed_inputs[index].set_lod(input.lod()); feed_inputs[index].set_lod(input.lod());
} }
LoDTensor& GetFetchVariable(const std::string& var_name, size_t index) { LoDTensor& GetFetchVariable(const Scope& scope, const std::string& var_name,
size_t index) {
// Since we want to fetch LodTensor from a variable, the variable must // Since we want to fetch LodTensor from a variable, the variable must
// be created alreadly. // be created alreadly.
Variable* g_fetch_value = GetGlobalScope().FindVar(var_name); Variable* g_fetch_value = scope.FindVar(var_name);
PADDLE_ENFORCE(g_fetch_value->IsType<FeedFetchList>(), PADDLE_ENFORCE(g_fetch_value->IsType<FeedFetchList>(),
"Only %s can be invoked by GetFetchVariable", "Only %s can be invoked by GetFetchVariable",
typeid(FeedFetchList).name()); typeid(FeedFetchList).name());
......
...@@ -25,31 +25,50 @@ LoD SliceLevels(const LoD& in, size_t level_begin, size_t level_end) { ...@@ -25,31 +25,50 @@ LoD SliceLevels(const LoD& in, size_t level_begin, size_t level_end) {
for (size_t i = level_begin; i < level_end; i++) { for (size_t i = level_begin; i < level_end; i++) {
new_lod.emplace_back(in.at(i)); new_lod.emplace_back(in.at(i));
} }
// transform the lowest level to absolute offset.
LoD abs_offset_lod = ToAbsOffset(in);
new_lod.back() = abs_offset_lod[level_end - 1];
return new_lod; return new_lod;
} }
LoD SliceInLevel(const LoD& in, size_t level, size_t elem_begin, LoD SliceInLevel(const LoD& in, size_t level, size_t elem_begin,
size_t elem_end) { size_t elem_end) {
// slice the lod. PADDLE_ENFORCE_LT(level, in.size());
LoD new_lod; PADDLE_ENFORCE_LT(elem_end, in[level].size());
new_lod.reserve(in.size() - level);
auto start = in.at(level)[elem_begin]; LoD res;
auto end = in.at(level)[elem_end]; res.resize(in.size() - level);
// copy the first level
for (auto it = in.begin() + level; it != in.end(); it++) { res[0].assign(in[level].begin() + elem_begin,
auto it_begin = std::find(it->begin(), it->end(), start); in[level].begin() + elem_end + 1);
auto it_end = std::find(it_begin, it->end(), end); for (size_t lvl = 1; lvl < res.size(); lvl++) {
PADDLE_ENFORCE(it_begin != it->end(), "error in parsing lod info"); const auto& in_level = in[level + lvl];
PADDLE_ENFORCE(it_end != it->end(), "error in parsing lod info"); const auto& above_level = res[lvl - 1];
new_lod.emplace_back(it_begin, it_end + 1); auto& out_level = res[lvl];
// reset offset if tensor is copyed and sliced. out_level.assign(in_level.begin() + above_level.front(),
std::transform(new_lod.back().begin(), new_lod.back().end(), in_level.begin() + above_level.back() + 1);
new_lod.back().begin(),
[start](int v) { return v - start; });
PADDLE_ENFORCE_EQ(new_lod.back().front(), 0, "error in slice LoD");
} }
PADDLE_ENFORCE_LE(new_lod.size(), in.size()); for (size_t lvl = 0; lvl < res.size(); lvl++) {
return new_lod; // to make the first offset equals 0, all the elements minus the first
// element
size_t front = res[lvl].front();
for (auto& ele : res[lvl]) {
ele -= front;
}
}
return res;
}
LoD ToAbsOffset(const LoD& in) {
// the lowest level stores relative offsets
if (in.empty() || in.size() == 1) return in;
LoD result = in;
for (int level = result.size() - 2; level >= 0; level--) {
for (auto& ele : result[level]) {
ele = result[level + 1][ele];
}
}
return result;
} }
bool operator==(const LoD& a, const LoD& b) { bool operator==(const LoD& a, const LoD& b) {
...@@ -75,17 +94,7 @@ bool operator==(const LoD& a, const LoD& b) { ...@@ -75,17 +94,7 @@ bool operator==(const LoD& a, const LoD& b) {
size_t LoDTensor::NumElements(size_t level, size_t idx) const { size_t LoDTensor::NumElements(size_t level, size_t idx) const {
PADDLE_ENFORCE_LT(level, NumLevels()); PADDLE_ENFORCE_LT(level, NumLevels());
PADDLE_ENFORCE_LT(idx, NumElements(level)); PADDLE_ENFORCE_LT(idx, NumElements(level));
// the last level of LoD, just return number of records in Tensor return lod_[level][idx + 1] - lod_[level][idx];
if (level == NumLevels() - 1) {
return lod_[level][idx + 1] - lod_[level][idx];
}
// high level of LoD, and there is another lower level, return number of
// lower-level elements
auto tmp = SliceInLevel(lod_, level, idx, idx + 1);
PADDLE_ENFORCE_GE(tmp.size(), 2);
// there is a 0 as a placeholder stored in LoD, so the number of elements
// equals lod.size() - 1
return tmp[1].size() - 1;
} }
void LoDTensor::ShrinkLevels(size_t level_begin, size_t level_end) { void LoDTensor::ShrinkLevels(size_t level_begin, size_t level_end) {
......
...@@ -39,23 +39,36 @@ using Vector = thrust::host_vector< ...@@ -39,23 +39,36 @@ using Vector = thrust::host_vector<
#endif #endif
/* /*
* 3-level LoD stores * LoD is short for Level of Details.
* *
* 0 10 20 * - in a level, each element indicates relative offset of the lower level
* 0 5 10 15 20
* 0 2 5 7 10 12 15 20
*
* - in a level, each element indicates offset in the underlying Tensor
* - the first element should be 0 and that indicates that this sequence start * - the first element should be 0 and that indicates that this sequence start
* from 0 * from 0
* - each sequence's begin and end(no-inclusive) is level[id, id+1] * - each sequence's begin and end(no-inclusive) is level[id, id+1]
*
* For example:
* 3-level LoD stores
*
* 0 2 3
* 0 2 4 7
* 0 2 5 7 10 12 15 20
*/ */
using LoD = std::vector<Vector<size_t>>; using LoD = std::vector<Vector<size_t>>;
/*
* Slice levels from a LoD.
* NOTE the lowest level should always be the absolute offsets of the underlying
* tensor instances. So if higher layers are sliced without the lowest level,
* the lower level of the sliced LoD will be transformed to the absolute offset.
*/
LoD SliceLevels(const LoD& in, size_t level_begin, size_t level_end); LoD SliceLevels(const LoD& in, size_t level_begin, size_t level_end);
LoD SliceInLevel(const LoD& in, size_t level, size_t elem_begin, LoD SliceInLevel(const LoD& in, size_t level, size_t elem_begin,
size_t elem_end); size_t elem_end);
/*
* Transform an LoD from relative offsets to absolute offsets.
*/
LoD ToAbsOffset(const LoD& in);
bool operator==(const LoD& a, const LoD& b); bool operator==(const LoD& a, const LoD& b);
......
...@@ -30,8 +30,8 @@ class LoDTensorTester : public ::testing::Test { ...@@ -30,8 +30,8 @@ class LoDTensorTester : public ::testing::Test {
// 0 5 10 15 20 // 0 5 10 15 20
// 0 2 5 7 10 12 15 20 // 0 2 5 7 10 12 15 20
LoD lod; LoD lod;
lod.push_back(std::vector<size_t>{0, 10, 20}); lod.push_back(std::vector<size_t>{0, 2, 3});
lod.push_back(std::vector<size_t>{0, 5, 10, 15, 20}); lod.push_back(std::vector<size_t>{0, 2, 5, 8});
lod.push_back(std::vector<size_t>{0, 2, 5, 7, 10, 12, 15, 17, 20}); lod.push_back(std::vector<size_t>{0, 2, 5, 7, 10, 12, 15, 17, 20});
ASSERT_EQ(lod.size(), 3UL); ASSERT_EQ(lod.size(), 3UL);
...@@ -52,14 +52,14 @@ TEST_F(LoDTensorTester, NumLevels) { ASSERT_EQ(lod_tensor_.NumLevels(), 3UL); } ...@@ -52,14 +52,14 @@ TEST_F(LoDTensorTester, NumLevels) { ASSERT_EQ(lod_tensor_.NumLevels(), 3UL); }
TEST_F(LoDTensorTester, NumElements) { TEST_F(LoDTensorTester, NumElements) {
ASSERT_EQ(lod_tensor_.NumElements(0), 2UL); ASSERT_EQ(lod_tensor_.NumElements(0), 2UL);
ASSERT_EQ(lod_tensor_.NumElements(1), 4UL); ASSERT_EQ(lod_tensor_.NumElements(1), 3UL);
ASSERT_EQ(lod_tensor_.NumElements(2), 8UL); ASSERT_EQ(lod_tensor_.NumElements(2), 8UL);
} }
TEST_F(LoDTensorTester, NumElements2) { TEST_F(LoDTensorTester, NumElements2) {
ASSERT_EQ(lod_tensor_.NumElements(0, 0), 2UL); ASSERT_EQ(lod_tensor_.NumElements(0, 0), 2UL);
ASSERT_EQ(lod_tensor_.NumElements(0, 1), 2UL); ASSERT_EQ(lod_tensor_.NumElements(0, 1), 1UL);
ASSERT_EQ(lod_tensor_.NumElements(1, 1), 2UL); ASSERT_EQ(lod_tensor_.NumElements(1, 1), 3UL);
} }
TEST_F(LoDTensorTester, ShrinkLevels) { TEST_F(LoDTensorTester, ShrinkLevels) {
...@@ -68,17 +68,16 @@ TEST_F(LoDTensorTester, ShrinkLevels) { ...@@ -68,17 +68,16 @@ TEST_F(LoDTensorTester, ShrinkLevels) {
LoDTensor new_lod_tensor = lod_tensor_; LoDTensor new_lod_tensor = lod_tensor_;
new_lod_tensor.ShrinkLevels(level, level + 1); new_lod_tensor.ShrinkLevels(level, level + 1);
ASSERT_EQ(new_lod_tensor.NumLevels(), 1UL); ASSERT_EQ(new_lod_tensor.NumLevels(), 1UL);
ASSERT_EQ(new_lod_tensor.NumElements(0), lod_tensor_.NumElements(level));
ASSERT_EQ(new_lod_tensor.data<float>(), lod_tensor_.data<float>()); ASSERT_EQ(new_lod_tensor.data<float>(), lod_tensor_.data<float>());
} }
// shrink 2 level // shrink 2 level
for (size_t level = 0; level < 2UL; ++level) { for (size_t level = 0; level < 2UL; ++level) {
LoDTensor new_lod_tensor = lod_tensor_; LoDTensor new_lod_tensor = lod_tensor_;
new_lod_tensor.ShrinkLevels(level, level + 2); new_lod_tensor.ShrinkLevels(level, level + 2);
// the lowest level's last element should be the tensor's batch_size.
ASSERT_EQ(new_lod_tensor.lod().back().back(),
lod_tensor_.lod().back().back());
ASSERT_EQ(new_lod_tensor.NumLevels(), 2UL); ASSERT_EQ(new_lod_tensor.NumLevels(), 2UL);
ASSERT_EQ(new_lod_tensor.NumElements(0), lod_tensor_.NumElements(level));
ASSERT_EQ(new_lod_tensor.NumElements(1),
lod_tensor_.NumElements(level + 1));
ASSERT_EQ(new_lod_tensor.data<float>(), lod_tensor_.data<float>()); ASSERT_EQ(new_lod_tensor.data<float>(), lod_tensor_.data<float>());
} }
} }
...@@ -86,19 +85,19 @@ TEST_F(LoDTensorTester, ShrinkLevels) { ...@@ -86,19 +85,19 @@ TEST_F(LoDTensorTester, ShrinkLevels) {
TEST_F(LoDTensorTester, ShrinkInLevel) { TEST_F(LoDTensorTester, ShrinkInLevel) {
size_t level = 0; size_t level = 0;
LoDTensor new_lod_tensor = lod_tensor_; LoDTensor new_lod_tensor = lod_tensor_;
new_lod_tensor.ShrinkInLevel(level, 0, 2); new_lod_tensor.ShrinkInLevel(level, 0, 1);
EXPECT_EQ(new_lod_tensor.NumLevels(), 3UL); EXPECT_EQ(new_lod_tensor.NumLevels(), 3UL);
EXPECT_EQ(new_lod_tensor.NumElements(0), 2UL); EXPECT_EQ(new_lod_tensor.NumElements(0), 1UL);
EXPECT_EQ(new_lod_tensor.NumElements(1), 4UL); EXPECT_EQ(new_lod_tensor.NumElements(1), 2UL);
EXPECT_EQ(new_lod_tensor.NumElements(2), 8UL); EXPECT_EQ(new_lod_tensor.NumElements(2), 5UL);
ASSERT_EQ(new_lod_tensor.data<float>(), lod_tensor_.data<float>()); ASSERT_EQ(new_lod_tensor.data<float>(), lod_tensor_.data<float>());
level = 1; level = 1;
new_lod_tensor = lod_tensor_; new_lod_tensor = lod_tensor_;
new_lod_tensor.ShrinkInLevel(level, 0, 2); new_lod_tensor.ShrinkInLevel(level, 1, 2);
ASSERT_EQ(new_lod_tensor.NumLevels(), 2UL); ASSERT_EQ(new_lod_tensor.NumLevels(), 2UL);
ASSERT_EQ(new_lod_tensor.NumElements(0), 2UL); ASSERT_EQ(new_lod_tensor.NumElements(0), 1UL);
ASSERT_EQ(new_lod_tensor.NumElements(1), 4UL); ASSERT_EQ(new_lod_tensor.NumElements(1), 3UL);
ASSERT_EQ(new_lod_tensor.data<float>(), lod_tensor_.data<float>()); ASSERT_EQ(new_lod_tensor.data<float>(), lod_tensor_.data<float>());
} }
......
...@@ -65,12 +65,11 @@ void Scope::DropKids() { ...@@ -65,12 +65,11 @@ void Scope::DropKids() {
kids_.clear(); kids_.clear();
} }
framework::Scope& GetGlobalScope() { void Scope::DeleteScope(Scope* scope) {
static framework::Scope* g_scope = nullptr; auto it = std::find(this->kids_.begin(), this->kids_.end(), scope);
if (g_scope == nullptr) { PADDLE_ENFORCE(it != this->kids_.end(), "Cannot find %p as kid scope", scope);
g_scope = new framework::Scope(); this->kids_.erase(it);
} delete scope;
return *g_scope;
} }
} // namespace framework } // namespace framework
......
...@@ -59,6 +59,8 @@ class Scope { ...@@ -59,6 +59,8 @@ class Scope {
/// Find the scope or an ancestor scope that contains the given variable. /// Find the scope or an ancestor scope that contains the given variable.
const Scope* FindScope(const Variable* var) const; const Scope* FindScope(const Variable* var) const;
void DeleteScope(Scope* scope);
/// Drop all kids scopes belonged to this scope. /// Drop all kids scopes belonged to this scope.
void DropKids(); void DropKids();
...@@ -72,8 +74,5 @@ class Scope { ...@@ -72,8 +74,5 @@ class Scope {
DISABLE_COPY_AND_ASSIGN(Scope); DISABLE_COPY_AND_ASSIGN(Scope);
}; };
framework::Scope& GetGlobalScope();
} // namespace framework } // namespace framework
} // namespace paddle } // namespace paddle
...@@ -60,6 +60,10 @@ class Tensor { ...@@ -60,6 +60,10 @@ class Tensor {
template <typename T> template <typename T>
inline T* mutable_data(platform::Place place); inline T* mutable_data(platform::Place place);
inline void* mutable_data(platform::Place place, std::type_index type);
inline void* mutable_data(platform::Place place);
/** /**
* @brief Return a pointer to mutable memory block. * @brief Return a pointer to mutable memory block.
* *
...@@ -81,7 +85,6 @@ class Tensor { ...@@ -81,7 +85,6 @@ class Tensor {
inline Tensor& Resize(const DDim& dims); inline Tensor& Resize(const DDim& dims);
/*! The internal of two tensors share the same memory block. */ /*! The internal of two tensors share the same memory block. */
template <typename T>
inline Tensor& ShareDataWith(const Tensor& src); inline Tensor& ShareDataWith(const Tensor& src);
/** /**
...@@ -96,26 +99,9 @@ class Tensor { ...@@ -96,26 +99,9 @@ class Tensor {
// TODO(qijun): https://github.com/PaddlePaddle/Paddle/issues/4647 // TODO(qijun): https://github.com/PaddlePaddle/Paddle/issues/4647
// Remove `CopyFrom` and `CopyFromVector` from Tensor interface // Remove `CopyFrom` and `CopyFromVector` from Tensor interface
// and make them global functions // and make them global functions
template <typename T>
inline void CopyFrom(const Tensor& src, const platform::Place& dst_place, inline void CopyFrom(const Tensor& src, const platform::Place& dst_place,
const platform::DeviceContext& ctx); const platform::DeviceContext& ctx);
// FIXME(yuyang18): CopyFrom should without template T, use the replace
// `CopyFrom` with `CopyFromTensor`
inline void CopyFromTensor(const Tensor& src,
const platform::Place& dst_place,
const platform::DeviceContext& ctx) {
// NOLINTNEXTLINES_8 cpplint.py will recognize below lines as functions.
// That is a bug of cpplint.py. Just ignore lint these lines.
if (src.type() == std::type_index(typeid(double))) {
CopyFrom<double>(src, dst_place, ctx);
} else if (src.type() == std::type_index(typeid(float))) {
CopyFrom<float>(src, dst_place, ctx);
} else if (src.type() == std::type_index(typeid(int))) {
CopyFrom<int>(src, dst_place, ctx);
}
}
/** /**
* @brief Copy the content of an external vector to a tensor. * @brief Copy the content of an external vector to a tensor.
* *
...@@ -135,7 +121,6 @@ class Tensor { ...@@ -135,7 +121,6 @@ class Tensor {
* @param[in] begin_idx The begin index of the slice. * @param[in] begin_idx The begin index of the slice.
* @param[in] end_idx The end index of the slice. * @param[in] end_idx The end index of the slice.
*/ */
template <typename T>
inline Tensor Slice(const int& begin_idx, const int& end_idx) const; inline Tensor Slice(const int& begin_idx, const int& end_idx) const;
platform::Place place() const { platform::Place place() const {
...@@ -146,7 +131,6 @@ class Tensor { ...@@ -146,7 +131,6 @@ class Tensor {
std::type_index type() const { return holder_->type(); } std::type_index type() const { return holder_->type(); }
private: private:
template <typename T>
inline void check_memory_size() const; inline void check_memory_size() const;
private: private:
...@@ -155,20 +139,22 @@ class Tensor { ...@@ -155,20 +139,22 @@ class Tensor {
* parameter of Variable. * parameter of Variable.
*/ */
struct Placeholder { struct Placeholder {
virtual ~Placeholder() {} virtual ~Placeholder() = default;
virtual void* ptr() const = 0; virtual void* ptr() const = 0;
virtual size_t size() const = 0; virtual size_t size() const = 0;
virtual std::type_index type() const = 0; virtual std::type_index type() const = 0;
virtual platform::Place place() const = 0; virtual platform::Place place() const = 0;
virtual void set_type(std::type_index type) = 0;
}; };
template <typename T, typename Place> template <typename Place>
struct PlaceholderImpl : public Placeholder { struct PlaceholderImpl : public Placeholder {
PlaceholderImpl(Place place, size_t size) PlaceholderImpl(Place place, size_t size, std::type_index type)
: ptr_(static_cast<T*>(memory::Alloc(place, size)), : ptr_(static_cast<uint8_t*>(memory::Alloc(place, size)),
memory::PODDeleter<T, Place>(place)), memory::PODDeleter<uint8_t, Place>(place)),
place_(place), place_(place),
size_(size) { size_(size),
type_(type) {
PADDLE_ENFORCE_NOT_NULL(ptr_, "Insufficient %s memory to allocation.", PADDLE_ENFORCE_NOT_NULL(ptr_, "Insufficient %s memory to allocation.",
(is_cpu_place(place_) ? "CPU" : "GPU")); (is_cpu_place(place_) ? "CPU" : "GPU"));
} }
...@@ -176,16 +162,20 @@ class Tensor { ...@@ -176,16 +162,20 @@ class Tensor {
virtual size_t size() const { return size_; } virtual size_t size() const { return size_; }
virtual platform::Place place() const { return place_; } virtual platform::Place place() const { return place_; }
virtual void* ptr() const { return static_cast<void*>(ptr_.get()); } virtual void* ptr() const { return static_cast<void*>(ptr_.get()); }
virtual std::type_index type() const { return std::type_index(typeid(T)); } virtual std::type_index type() const { return type_; }
virtual void set_type(std::type_index type) { type_ = type; }
/*! the pointer of memory block. */ /*! the pointer of memory block. */
std::unique_ptr<T, memory::PODDeleter<T, Place>> ptr_; std::unique_ptr<uint8_t, memory::PODDeleter<uint8_t, Place>> ptr_;
/*! the place of memory block. */ /*! the place of memory block. */
platform::Place place_; platform::Place place_;
/*! the size of memory block. */ /*! the size of memory block. */
size_t size_; size_t size_;
/* the current type of memory */
std::type_index type_;
}; };
/*! holds the memory block if allocated. */ /*! holds the memory block if allocated. */
......
...@@ -106,8 +106,8 @@ void TensorArray::Write(size_t index, const LoDTensor& value) { ...@@ -106,8 +106,8 @@ void TensorArray::Write(size_t index, const LoDTensor& value) {
values_[index].Resize(value.dims()); values_[index].Resize(value.dims());
values_[index].mutable_data<value_type>(platform::CPUPlace()); values_[index].mutable_data<value_type>(platform::CPUPlace());
values_[index].CopyFrom<value_type>(value, platform::CPUPlace(), values_[index].CopyFrom(value, platform::CPUPlace(),
platform::CPUDeviceContext()); platform::CPUDeviceContext());
} }
void TensorArray::WriteShared(size_t index, const LoDTensor& value) { void TensorArray::WriteShared(size_t index, const LoDTensor& value) {
...@@ -116,7 +116,7 @@ void TensorArray::WriteShared(size_t index, const LoDTensor& value) { ...@@ -116,7 +116,7 @@ void TensorArray::WriteShared(size_t index, const LoDTensor& value) {
values_.resize(index + 1); values_.resize(index + 1);
} }
values_[index].ShareDataWith<value_type>(value); values_[index].ShareDataWith(value);
} }
LoDTensor TensorArray::Pack(size_t level, const std::vector<DySeqMeta>& meta, LoDTensor TensorArray::Pack(size_t level, const std::vector<DySeqMeta>& meta,
...@@ -163,9 +163,9 @@ LoDTensor TensorArray::Stack() const { ...@@ -163,9 +163,9 @@ LoDTensor TensorArray::Stack() const {
result.mutable_data<value_type>(platform::CPUPlace()); result.mutable_data<value_type>(platform::CPUPlace());
for (size_t idx = 0; idx < size(); idx++) { for (size_t idx = 0; idx < size(); idx++) {
result.Slice<value_type>(idx, idx + 1) result.Slice(idx, idx + 1)
.CopyFrom<value_type>(Read(idx), platform::CPUPlace(), .CopyFrom(Read(idx), platform::CPUPlace(),
platform::CPUDeviceContext()); platform::CPUDeviceContext());
} }
return result; return result;
} }
...@@ -191,13 +191,12 @@ void TensorArray::Unstack(const LoDTensor& source, bool data_shared) const { ...@@ -191,13 +191,12 @@ void TensorArray::Unstack(const LoDTensor& source, bool data_shared) const {
auto& value = values_[elem]; auto& value = values_[elem];
if (data_shared) { if (data_shared) {
// share memory // share memory
value.ShareDataWith<value_type>(source.Slice<value_type>(elem, elem + 1)); value.ShareDataWith(source.Slice(elem, elem + 1));
} else { } else {
// copy // copy
value.Resize(value_dims); value.Resize(value_dims);
value.CopyFrom<value_type>(source.Slice<value_type>(elem, elem + 1), value.CopyFrom(source.Slice(elem, elem + 1), platform::CPUPlace(),
platform::CPUPlace(), platform::CPUDeviceContext());
platform::CPUDeviceContext());
} }
} }
} }
...@@ -242,11 +241,10 @@ LoDTensor DynamicBatchUnpacker::GetBatch(size_t index) { ...@@ -242,11 +241,10 @@ LoDTensor DynamicBatchUnpacker::GetBatch(size_t index) {
for (size_t i = 0; i < indice.size(); i++) { for (size_t i = 0; i < indice.size(); i++) {
auto index = indice[i]; auto index = indice[i];
auto target = result.Slice<value_type>(i, i + 1); auto target = result.Slice(i, i + 1);
auto slice = source->Slice<value_type>(index, index + 1); auto slice = source->Slice(index, index + 1);
target.CopyFrom<value_type>(slice, platform::CPUPlace(), target.CopyFrom(slice, platform::CPUPlace(), platform::CPUDeviceContext());
platform::CPUDeviceContext());
} }
return result; return result;
...@@ -277,10 +275,10 @@ LoDTensor PackDynamicBatch(const std::vector<LoDTensor>& source, ...@@ -277,10 +275,10 @@ LoDTensor PackDynamicBatch(const std::vector<LoDTensor>& source,
// target is result[index] // target is result[index]
auto index = seq_meta.begin + batch_id; auto index = seq_meta.begin + batch_id;
if (index >= seq_meta.end) break; if (index >= seq_meta.end) break;
auto source_ = source[batch_id].Slice<float>(seq_id, seq_id + 1); auto source_ = source[batch_id].Slice(seq_id, seq_id + 1);
auto target = result.Slice<float>(index, index + 1); auto target = result.Slice(index, index + 1);
target.CopyFrom<float>(source_, platform::CPUPlace(), target.CopyFrom(source_, platform::CPUPlace(),
platform::CPUDeviceContext()); platform::CPUDeviceContext());
} }
} }
......
...@@ -91,7 +91,7 @@ class TensorArrayPackTester : public ::testing::Test { ...@@ -91,7 +91,7 @@ class TensorArrayPackTester : public ::testing::Test {
size_t begin = level[i]; size_t begin = level[i];
size_t end = level[i + 1]; size_t end = level[i + 1];
for (size_t j = begin; j < end; j++) { for (size_t j = begin; j < end; j++) {
auto record = source.Slice<int>(j, j + 1); auto record = source.Slice(j, j + 1);
for (int dim = 0; dim < 128; dim++) { for (int dim = 0; dim < 128; dim++) {
record.mutable_data<int>(platform::CPUPlace())[dim] = j - begin; record.mutable_data<int>(platform::CPUPlace())[dim] = j - begin;
} }
......
...@@ -19,12 +19,50 @@ limitations under the License. */ ...@@ -19,12 +19,50 @@ limitations under the License. */
namespace paddle { namespace paddle {
namespace framework { namespace framework {
template <typename... T>
struct SizeOfTypeFunctor;
template <typename T> template <typename T>
struct SizeOfTypeFunctor<T> {
size_t operator()(std::type_index type) const {
if (typeid(T).hash_code() == type.hash_code()) {
return sizeof(T);
} else {
return 0UL;
}
}
};
template <>
struct SizeOfTypeFunctor<> {
size_t operator()(std::type_index type) const { return 0UL; }
};
template <typename HEAD, typename... TAIL>
struct SizeOfTypeFunctor<HEAD, TAIL...> {
size_t operator()(std::type_index type) const {
SizeOfTypeFunctor<HEAD> head;
size_t head_size = head(type);
if (head_size != 0) {
return head_size;
}
SizeOfTypeFunctor<TAIL...> tail;
return tail(type);
}
};
static inline size_t SizeOfType(std::type_index type) {
SizeOfTypeFunctor<int, float, double, int16_t, int64_t> functor;
size_t size = functor(type);
PADDLE_ENFORCE(size != 0UL, "Cannot get size of type %s", type.name());
return size;
}
inline void Tensor::check_memory_size() const { inline void Tensor::check_memory_size() const {
PADDLE_ENFORCE_NOT_NULL( PADDLE_ENFORCE_NOT_NULL(
holder_, "Tensor holds no memory. Call Tensor::mutable_data first."); holder_, "Tensor holds no memory. Call Tensor::mutable_data first.");
PADDLE_ENFORCE_GE( PADDLE_ENFORCE_GE(
holder_->size(), numel() * sizeof(T) + offset_, holder_->size(), numel() * SizeOfType(type()) + offset_,
"Tensor's dims_ is out of bound. Call Tensor::mutable_data " "Tensor's dims_ is out of bound. Call Tensor::mutable_data "
"first to re-allocate memory.\n" "first to re-allocate memory.\n"
"or maybe the required data-type mismatches the data already stored."); "or maybe the required data-type mismatches the data already stored.");
...@@ -32,14 +70,23 @@ inline void Tensor::check_memory_size() const { ...@@ -32,14 +70,23 @@ inline void Tensor::check_memory_size() const {
template <typename T> template <typename T>
inline const T* Tensor::data() const { inline const T* Tensor::data() const {
check_memory_size<T>(); check_memory_size();
PADDLE_ENFORCE(std::is_same<T, void>::value ||
holder_->type().hash_code() == typeid(T).hash_code(),
"Tensor holds the wrong type, it holds %s",
this->holder_->type().name());
return reinterpret_cast<const T*>( return reinterpret_cast<const T*>(
reinterpret_cast<uintptr_t>(holder_->ptr()) + offset_); reinterpret_cast<uintptr_t>(holder_->ptr()) + offset_);
} }
template <typename T> template <typename T>
inline T* Tensor::data() { inline T* Tensor::data() {
check_memory_size<T>(); check_memory_size();
PADDLE_ENFORCE(std::is_same<T, void>::value ||
holder_->type().hash_code() == typeid(T).hash_code(),
"Tensor holds the wrong type, it holds %s",
this->holder_->type().name());
return reinterpret_cast<T*>(reinterpret_cast<uintptr_t>(holder_->ptr()) + return reinterpret_cast<T*>(reinterpret_cast<uintptr_t>(holder_->ptr()) +
offset_); offset_);
} }
...@@ -54,51 +101,62 @@ inline T* Tensor::mutable_data(DDim dims, platform::Place place) { ...@@ -54,51 +101,62 @@ inline T* Tensor::mutable_data(DDim dims, platform::Place place) {
template <typename T> template <typename T>
inline T* Tensor::mutable_data(platform::Place place) { inline T* Tensor::mutable_data(platform::Place place) {
static_assert(std::is_pod<T>::value, "T must be POD"); static_assert(std::is_pod<T>::value, "T must be POD");
return reinterpret_cast<T*>(mutable_data(place, typeid(T)));
}
inline void* Tensor::mutable_data(platform::Place place, std::type_index type) {
if (holder_ != nullptr) {
holder_->set_type(type);
}
PADDLE_ENFORCE_GT(numel(), 0, PADDLE_ENFORCE_GT(numel(), 0,
"Tensor's numel must be larger than zero to call " "Tensor's numel must be larger than zero to call "
"Tensor::mutable_data. Call Tensor::set_dim first."); "Tensor::mutable_data. Call Tensor::set_dim first.");
int64_t size = numel() * SizeOfType(type);
/* some versions of boost::variant don't have operator!= */ /* some versions of boost::variant don't have operator!= */
int64_t size = numel() * sizeof(T);
if (holder_ == nullptr || !(holder_->place() == place) || if (holder_ == nullptr || !(holder_->place() == place) ||
holder_->size() < size + offset_) { holder_->size() < size + offset_) {
if (platform::is_cpu_place(place)) { if (platform::is_cpu_place(place)) {
holder_.reset(new PlaceholderImpl<T, platform::CPUPlace>( holder_.reset(new PlaceholderImpl<platform::CPUPlace>(
boost::get<platform::CPUPlace>(place), size)); boost::get<platform::CPUPlace>(place), size, type));
} else if (platform::is_gpu_place(place)) { } else if (platform::is_gpu_place(place)) {
#ifndef PADDLE_WITH_CUDA #ifndef PADDLE_WITH_CUDA
PADDLE_THROW("'GPUPlace' is not supported in CPU only device."); PADDLE_THROW("'GPUPlace' is not supported in CPU only device.");
} }
#else #else
holder_.reset(new PlaceholderImpl<T, platform::GPUPlace>( holder_.reset(new PlaceholderImpl<platform::GPUPlace>(
boost::get<platform::GPUPlace>(place), size)); boost::get<platform::GPUPlace>(place), size, type));
} }
#endif #endif
offset_ = 0; offset_ = 0;
} }
return reinterpret_cast<T*>(reinterpret_cast<uintptr_t>(holder_->ptr()) + return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(holder_->ptr()) +
offset_); offset_);
}
inline void* Tensor::mutable_data(platform::Place place) {
PADDLE_ENFORCE(this->holder_ != nullptr,
"Cannot invoke mutable data if current hold nothing");
return mutable_data(place, holder_->type());
} }
template <typename T>
inline Tensor& Tensor::ShareDataWith(const Tensor& src) { inline Tensor& Tensor::ShareDataWith(const Tensor& src) {
src.check_memory_size<T>(); src.check_memory_size();
*this = src; *this = src;
return *this; return *this;
} }
template <typename T>
inline void Tensor::CopyFrom(const Tensor& src, inline void Tensor::CopyFrom(const Tensor& src,
const platform::Place& dst_place, const platform::Place& dst_place,
const platform::DeviceContext& ctx) { const platform::DeviceContext& ctx) {
src.check_memory_size<T>(); src.check_memory_size();
Resize(src.dims()); Resize(src.dims());
auto src_place = src.holder_->place(); auto src_place = src.holder_->place();
auto src_ptr = static_cast<const void*>(src.data<T>()); auto src_ptr = src.data<void>();
auto dst_ptr = static_cast<void*>(mutable_data<T>(dst_place)); auto dst_ptr = mutable_data(dst_place, src.type());
auto size = src.numel() * sizeof(T); auto size = src.numel() * SizeOfType(src.type());
if (platform::is_cpu_place(src_place) && platform::is_cpu_place(dst_place)) { if (platform::is_cpu_place(src_place) && platform::is_cpu_place(dst_place)) {
memory::Copy(boost::get<platform::CPUPlace>(dst_place), dst_ptr, memory::Copy(boost::get<platform::CPUPlace>(dst_place), dst_ptr,
...@@ -165,9 +223,8 @@ inline void Tensor::CopyFromVector(const std::vector<T>& src, ...@@ -165,9 +223,8 @@ inline void Tensor::CopyFromVector(const std::vector<T>& src,
#endif #endif
} }
template <typename T>
inline Tensor Tensor::Slice(const int& begin_idx, const int& end_idx) const { inline Tensor Tensor::Slice(const int& begin_idx, const int& end_idx) const {
check_memory_size<T>(); check_memory_size();
PADDLE_ENFORCE_GE(begin_idx, 0, "Slice begin index is less than zero."); PADDLE_ENFORCE_GE(begin_idx, 0, "Slice begin index is less than zero.");
PADDLE_ENFORCE_LE(end_idx, dims_[0], "Slice end index is out of bound."); PADDLE_ENFORCE_LE(end_idx, dims_[0], "Slice end index is out of bound.");
PADDLE_ENFORCE_LT(begin_idx, end_idx, PADDLE_ENFORCE_LT(begin_idx, end_idx,
...@@ -182,7 +239,7 @@ inline Tensor Tensor::Slice(const int& begin_idx, const int& end_idx) const { ...@@ -182,7 +239,7 @@ inline Tensor Tensor::Slice(const int& begin_idx, const int& end_idx) const {
DDim dst_dims = dims_; DDim dst_dims = dims_;
dst_dims[0] = end_idx - begin_idx; dst_dims[0] = end_idx - begin_idx;
dst.Resize(dst_dims); dst.Resize(dst_dims);
dst.offset_ = offset_ + begin_idx * base * sizeof(T); dst.offset_ = offset_ + begin_idx * base * SizeOfType(type());
return dst; return dst;
} }
} }
...@@ -196,10 +253,9 @@ inline const DDim& Tensor::dims() const { return dims_; } ...@@ -196,10 +253,9 @@ inline const DDim& Tensor::dims() const { return dims_; }
inline int64_t Tensor::numel() const { return product(dims_); } inline int64_t Tensor::numel() const { return product(dims_); }
template <typename T>
inline Tensor ReshapeToMatrix(const Tensor& src, int num_col_dims) { inline Tensor ReshapeToMatrix(const Tensor& src, int num_col_dims) {
Tensor res; Tensor res;
res.ShareDataWith<T>(src); res.ShareDataWith(src);
res.Resize(flatten_to_2d(src.dims(), num_col_dims)); res.Resize(flatten_to_2d(src.dims(), num_col_dims));
return res; return res;
} }
......
...@@ -108,7 +108,7 @@ TEST(Tensor, ShareDataWith) { ...@@ -108,7 +108,7 @@ TEST(Tensor, ShareDataWith) {
// Try to share data form uninitialized tensor // Try to share data form uninitialized tensor
bool caught = false; bool caught = false;
try { try {
dst_tensor.ShareDataWith<float>(src_tensor); dst_tensor.ShareDataWith(src_tensor);
} catch (paddle::platform::EnforceNotMet err) { } catch (paddle::platform::EnforceNotMet err) {
caught = true; caught = true;
std::string msg = std::string msg =
...@@ -122,7 +122,7 @@ TEST(Tensor, ShareDataWith) { ...@@ -122,7 +122,7 @@ TEST(Tensor, ShareDataWith) {
ASSERT_TRUE(caught); ASSERT_TRUE(caught);
src_tensor.mutable_data<int>(make_ddim({2, 3, 4}), CPUPlace()); src_tensor.mutable_data<int>(make_ddim({2, 3, 4}), CPUPlace());
dst_tensor.ShareDataWith<int>(src_tensor); dst_tensor.ShareDataWith(src_tensor);
ASSERT_EQ(src_tensor.data<int>(), dst_tensor.data<int>()); ASSERT_EQ(src_tensor.data<int>(), dst_tensor.data<int>());
} }
...@@ -131,7 +131,7 @@ TEST(Tensor, ShareDataWith) { ...@@ -131,7 +131,7 @@ TEST(Tensor, ShareDataWith) {
Tensor src_tensor; Tensor src_tensor;
Tensor dst_tensor; Tensor dst_tensor;
src_tensor.mutable_data<int>(make_ddim({2, 3, 4}), GPUPlace()); src_tensor.mutable_data<int>(make_ddim({2, 3, 4}), GPUPlace());
dst_tensor.ShareDataWith<int>(src_tensor); dst_tensor.ShareDataWith(src_tensor);
ASSERT_EQ(src_tensor.data<int>(), dst_tensor.data<int>()); ASSERT_EQ(src_tensor.data<int>(), dst_tensor.data<int>());
} }
#endif #endif
...@@ -143,7 +143,7 @@ TEST(Tensor, Slice) { ...@@ -143,7 +143,7 @@ TEST(Tensor, Slice) {
{ {
Tensor src_tensor; Tensor src_tensor;
src_tensor.mutable_data<int>(make_ddim({5, 3, 4}), CPUPlace()); src_tensor.mutable_data<int>(make_ddim({5, 3, 4}), CPUPlace());
Tensor slice_tensor = src_tensor.Slice<int>(1, 3); Tensor slice_tensor = src_tensor.Slice(1, 3);
DDim slice_dims = slice_tensor.dims(); DDim slice_dims = slice_tensor.dims();
ASSERT_EQ(arity(slice_dims), 3); ASSERT_EQ(arity(slice_dims), 3);
EXPECT_EQ(slice_dims[0], 2); EXPECT_EQ(slice_dims[0], 2);
...@@ -167,7 +167,7 @@ TEST(Tensor, Slice) { ...@@ -167,7 +167,7 @@ TEST(Tensor, Slice) {
{ {
Tensor src_tensor; Tensor src_tensor;
src_tensor.mutable_data<double>(make_ddim({6, 9}), GPUPlace()); src_tensor.mutable_data<double>(make_ddim({6, 9}), GPUPlace());
Tensor slice_tensor = src_tensor.Slice<double>(2, 6); Tensor slice_tensor = src_tensor.Slice(2, 6);
DDim slice_dims = slice_tensor.dims(); DDim slice_dims = slice_tensor.dims();
ASSERT_EQ(arity(slice_dims), 2); ASSERT_EQ(arity(slice_dims), 2);
EXPECT_EQ(slice_dims[0], 4); EXPECT_EQ(slice_dims[0], 4);
...@@ -202,7 +202,7 @@ TEST(Tensor, CopyFrom) { ...@@ -202,7 +202,7 @@ TEST(Tensor, CopyFrom) {
memcpy(src_ptr, arr, 9 * sizeof(int)); memcpy(src_ptr, arr, 9 * sizeof(int));
auto cpu_place = new paddle::platform::CPUPlace(); auto cpu_place = new paddle::platform::CPUPlace();
dst_tensor.CopyFrom<int>(src_tensor, *cpu_place, cpu_ctx); dst_tensor.CopyFrom(src_tensor, *cpu_place, cpu_ctx);
const int* dst_ptr = dst_tensor.data<int>(); const int* dst_ptr = dst_tensor.data<int>();
ASSERT_NE(src_ptr, dst_ptr); ASSERT_NE(src_ptr, dst_ptr);
...@@ -210,8 +210,8 @@ TEST(Tensor, CopyFrom) { ...@@ -210,8 +210,8 @@ TEST(Tensor, CopyFrom) {
EXPECT_EQ(src_ptr[i], dst_ptr[i]); EXPECT_EQ(src_ptr[i], dst_ptr[i]);
} }
Tensor slice_tensor = src_tensor.Slice<int>(1, 2); Tensor slice_tensor = src_tensor.Slice(1, 2);
dst_tensor.CopyFrom<int>(slice_tensor, *cpu_place, cpu_ctx); dst_tensor.CopyFrom(slice_tensor, *cpu_place, cpu_ctx);
const int* slice_ptr = slice_tensor.data<int>(); const int* slice_ptr = slice_tensor.data<int>();
dst_ptr = dst_tensor.data<int>(); dst_ptr = dst_tensor.data<int>();
ASSERT_NE(dst_ptr, slice_ptr); ASSERT_NE(dst_ptr, slice_ptr);
...@@ -233,11 +233,11 @@ TEST(Tensor, CopyFrom) { ...@@ -233,11 +233,11 @@ TEST(Tensor, CopyFrom) {
// CPU Tensor to GPU Tensor // CPU Tensor to GPU Tensor
auto gpu_place = new paddle::platform::GPUPlace(0); auto gpu_place = new paddle::platform::GPUPlace(0);
CUDADeviceContext gpu_ctx(*gpu_place); CUDADeviceContext gpu_ctx(*gpu_place);
gpu_tensor.CopyFrom<int>(src_tensor, *gpu_place, gpu_ctx); gpu_tensor.CopyFrom(src_tensor, *gpu_place, gpu_ctx);
// GPU Tensor to CPU Tensor // GPU Tensor to CPU Tensor
auto cpu_place = new paddle::platform::CPUPlace(); auto cpu_place = new paddle::platform::CPUPlace();
dst_tensor.CopyFrom<int>(gpu_tensor, *cpu_place, gpu_ctx); dst_tensor.CopyFrom(gpu_tensor, *cpu_place, gpu_ctx);
// Sync before Compare Tensors // Sync before Compare Tensors
gpu_ctx.Wait(); gpu_ctx.Wait();
...@@ -247,13 +247,13 @@ TEST(Tensor, CopyFrom) { ...@@ -247,13 +247,13 @@ TEST(Tensor, CopyFrom) {
EXPECT_EQ(src_ptr[i], dst_ptr[i]); EXPECT_EQ(src_ptr[i], dst_ptr[i]);
} }
Tensor slice_tensor = src_tensor.Slice<int>(1, 2); Tensor slice_tensor = src_tensor.Slice(1, 2);
// CPU Slice Tensor to GPU Tensor // CPU Slice Tensor to GPU Tensor
gpu_tensor.CopyFrom<int>(slice_tensor, *gpu_place, gpu_ctx); gpu_tensor.CopyFrom(slice_tensor, *gpu_place, gpu_ctx);
// GPU Tensor to CPU Tensor // GPU Tensor to CPU Tensor
dst_tensor.CopyFrom<int>(gpu_tensor, *cpu_place, gpu_ctx); dst_tensor.CopyFrom(gpu_tensor, *cpu_place, gpu_ctx);
// Sync before Compare Slice Tensors // Sync before Compare Slice Tensors
gpu_ctx.Wait(); gpu_ctx.Wait();
...@@ -320,7 +320,7 @@ TEST(Tensor, CopyFromVector) { ...@@ -320,7 +320,7 @@ TEST(Tensor, CopyFromVector) {
CUDADeviceContext gpu_ctx(*gpu_place); CUDADeviceContext gpu_ctx(*gpu_place);
gpu_tensor.CopyFromVector<int>(src_vec, gpu_ctx); gpu_tensor.CopyFromVector<int>(src_vec, gpu_ctx);
// Copy from GPU to CPU tensor for comparison // Copy from GPU to CPU tensor for comparison
dst_tensor.CopyFrom<int>(gpu_tensor, *cpu_place, gpu_ctx); dst_tensor.CopyFrom(gpu_tensor, *cpu_place, gpu_ctx);
// Sync before Compare Tensors // Sync before Compare Tensors
gpu_ctx.Wait(); gpu_ctx.Wait();
...@@ -340,7 +340,7 @@ TEST(Tensor, CopyFromVector) { ...@@ -340,7 +340,7 @@ TEST(Tensor, CopyFromVector) {
cpu_tensor.CopyFromVector<int>(src_vec, cpu_ctx); cpu_tensor.CopyFromVector<int>(src_vec, cpu_ctx);
gpu_tensor.Resize(make_ddim({2, 2})); gpu_tensor.Resize(make_ddim({2, 2}));
gpu_tensor.CopyFromVector<int>(src_vec, gpu_ctx); gpu_tensor.CopyFromVector<int>(src_vec, gpu_ctx);
dst_tensor.CopyFrom<int>(gpu_tensor, *cpu_place, gpu_ctx); dst_tensor.CopyFrom(gpu_tensor, *cpu_place, gpu_ctx);
// Sync before Compare Tensors // Sync before Compare Tensors
gpu_ctx.Wait(); gpu_ctx.Wait();
...@@ -368,7 +368,7 @@ TEST(Tensor, ReshapeToMatrix) { ...@@ -368,7 +368,7 @@ TEST(Tensor, ReshapeToMatrix) {
for (int i = 0; i < 2 * 3 * 4 * 9; ++i) { for (int i = 0; i < 2 * 3 * 4 * 9; ++i) {
src_ptr[i] = i; src_ptr[i] = i;
} }
Tensor res = ReshapeToMatrix<int>(src, 2); Tensor res = ReshapeToMatrix(src, 2);
ASSERT_EQ(res.dims()[0], 2 * 3); ASSERT_EQ(res.dims()[0], 2 * 3);
ASSERT_EQ(res.dims()[1], 4 * 9); ASSERT_EQ(res.dims()[1], 4 * 9);
} }
# Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle.trainer_config_helpers import *
settings(batch_size=16)
channels = get_config_arg("channels", int, 2)
def two_fc(input, group_name):
out1 = fc_layer(input=input,
name=group_name+'_fc1',
size=channels,
bias_attr=False,
act=LinearActivation())
out2 = fc_layer(input=input,
name=group_name+'_fc2',
size=channels,
bias_attr=False,
act=LinearActivation())
return out1, out2
data = data_layer(name ="input", size=channels*16*16)
conv = img_conv_layer(input=data,
num_channels=channels,
filter_size=3,
num_filters=channels,
padding=1,
shared_biases=True,
act=LinearActivation())
pool = img_pool_layer(input=conv,
pool_size=3,
stride=2,
padding=1,
pool_type=AvgPooling())
a1, a2 = two_fc(input=pool, group_name='a')
concat = concat_layer(input=[a1, a2])
b1, b2 = two_fc(input=pool, group_name='b')
addto = addto_layer(input=[b1, b2])
outputs([concat, addto])
# Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle.trainer_config_helpers import *
settings(batch_size=16)
channels = get_config_arg("channels", int, 2)
def two_pool(input, group_name):
out1 = img_pool_layer(input=input,
name=group_name+'_pool1',
pool_size=3,
stride=2,
padding=0,
pool_type=MaxPooling())
out2 = img_pool_layer(input=input,
name=group_name+'_pool2',
pool_size=5,
stride=2,
padding=1,
pool_type=MaxPooling())
return out1, out2
data = data_layer(name ="input", size=channels*16*16)
conv = img_conv_layer(input=data,
num_channels=channels,
filter_size=3,
num_filters=channels,
padding=1,
shared_biases=True,
act=LinearActivation())
pool = img_pool_layer(input=conv,
pool_size=3,
stride=1,
padding=1,
pool_type=AvgPooling())
a1, a2 = two_pool(input=pool, group_name='a')
concat = concat_layer(input=[a1, a2])
b1, b2 = two_pool(input=pool, group_name='b')
addto = addto_layer(input=[b1, b2])
outputs([concat, addto])
...@@ -250,7 +250,7 @@ TEST(MKLDNNActivation, Activations) { ...@@ -250,7 +250,7 @@ TEST(MKLDNNActivation, Activations) {
DECLARE_string(config_args); DECLARE_string(config_args);
TEST(MKLDNNLayer, branches) { TEST(MKLDNNLayer, branches) {
std::vector<std::string> cases = {"conv"}; std::vector<std::string> cases = {"conv", "pool", "fc"};
for (auto name : cases) { for (auto name : cases) {
std::string config = "./gserver/tests/mkldnn_branches_" + name + ".conf"; std::string config = "./gserver/tests/mkldnn_branches_" + name + ".conf";
for (auto channels : {2, 32}) { for (auto channels : {2, 32}) {
......
...@@ -69,5 +69,8 @@ information, or not. But the output only shares the LoD with input `Inference`. ...@@ -69,5 +69,8 @@ information, or not. But the output only shares the LoD with input `Inference`.
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OP_WITHOUT_GRADIENT(accuracy, ops::AccuracyOp, ops::AccuracyOpMaker); REGISTER_OP_WITHOUT_GRADIENT(accuracy, ops::AccuracyOp, ops::AccuracyOpMaker);
REGISTER_OP_CPU_KERNEL(accuracy, REGISTER_OP_CPU_KERNEL(
ops::AccuracyKernel<paddle::platform::CPUPlace, float>); accuracy, ops::AccuracyKernel<paddle::platform::CPUPlace, float>,
ops::AccuracyKernel<paddle::platform::CPUPlace, int>,
ops::AccuracyKernel<paddle::platform::CPUPlace, double>,
ops::AccuracyKernel<paddle::platform::CPUPlace, int64_t>);
...@@ -21,9 +21,9 @@ namespace paddle { ...@@ -21,9 +21,9 @@ namespace paddle {
namespace operators { namespace operators {
using platform::PADDLE_CUDA_NUM_THREADS; using platform::PADDLE_CUDA_NUM_THREADS;
template <int BlockSize> template <typename T, int BlockSize>
__global__ void AccuracyCudaKernel(const int N, const int D, const int* Xdata, __global__ void AccuracyCudaKernel(const int N, const int D, const T* Xdata,
const int* labeldata, float* accuracy) { const T* labeldata, float* accuracy) {
int count = 0; int count = 0;
__shared__ int total[BlockSize]; __shared__ int total[BlockSize];
...@@ -57,8 +57,8 @@ class AccuracyOpCUDAKernel : public framework::OpKernel<T> { ...@@ -57,8 +57,8 @@ class AccuracyOpCUDAKernel : public framework::OpKernel<T> {
auto* accuracy = ctx.Output<Tensor>("Accuracy"); auto* accuracy = ctx.Output<Tensor>("Accuracy");
// FIXME(typhoonzero): only support indices currently // FIXME(typhoonzero): only support indices currently
// if add support for output values, how to detect the data type? // if add support for output values, how to detect the data type?
const int* inference_data = inference->data<int>(); const T* inference_data = inference->data<T>();
const int* label_data = label->data<int>(); const T* label_data = label->data<T>();
float* accuracy_data = accuracy->mutable_data<float>(ctx.GetPlace()); float* accuracy_data = accuracy->mutable_data<float>(ctx.GetPlace());
size_t num_samples = inference->dims()[0]; size_t num_samples = inference->dims()[0];
...@@ -69,7 +69,7 @@ class AccuracyOpCUDAKernel : public framework::OpKernel<T> { ...@@ -69,7 +69,7 @@ class AccuracyOpCUDAKernel : public framework::OpKernel<T> {
return; return;
} }
AccuracyCudaKernel<PADDLE_CUDA_NUM_THREADS><<< AccuracyCudaKernel<T, PADDLE_CUDA_NUM_THREADS><<<
1, PADDLE_CUDA_NUM_THREADS, 0, 1, PADDLE_CUDA_NUM_THREADS, 0,
reinterpret_cast<const platform::CUDADeviceContext&>( reinterpret_cast<const platform::CUDADeviceContext&>(
ctx.device_context()) ctx.device_context())
...@@ -81,5 +81,7 @@ class AccuracyOpCUDAKernel : public framework::OpKernel<T> { ...@@ -81,5 +81,7 @@ class AccuracyOpCUDAKernel : public framework::OpKernel<T> {
} // namespace operators } // namespace operators
} // namespace paddle } // namespace paddle
REGISTER_OP_GPU_KERNEL(accuracy, REGISTER_OP_GPU_KERNEL(accuracy, paddle::operators::AccuracyOpCUDAKernel<float>,
paddle::operators::AccuracyOpCUDAKernel<float>); paddle::operators::AccuracyOpCUDAKernel<double>,
paddle::operators::AccuracyOpCUDAKernel<int>,
paddle::operators::AccuracyOpCUDAKernel<int64_t>);
...@@ -108,17 +108,17 @@ class GemmConv2DKernel : public framework::OpKernel<T> { ...@@ -108,17 +108,17 @@ class GemmConv2DKernel : public framework::OpKernel<T> {
int in_step = input_channels / groups; int in_step = input_channels / groups;
int out_step = output_channels / groups; int out_step = output_channels / groups;
for (int i = 0; i < batch_size; i++) { for (int i = 0; i < batch_size; i++) {
Tensor in_batch = input->Slice<T>(i, i + 1).Resize(input_shape); Tensor in_batch = input->Slice(i, i + 1).Resize(input_shape);
Tensor out_batch = output->Slice<T>(i, i + 1).Resize(output_matrix_shape); Tensor out_batch = output->Slice(i, i + 1).Resize(output_matrix_shape);
for (int g = 0; g < groups; g++) { for (int g = 0; g < groups; g++) {
// im2col // im2col
Tensor in_slice = in_batch.Slice<T>(g * in_step, (g + 1) * in_step); Tensor in_slice = in_batch.Slice(g * in_step, (g + 1) * in_step);
im2col(context.device_context(), in_slice, col, strides[0], strides[1], im2col(context.device_context(), in_slice, col, strides[0], strides[1],
paddings[0], paddings[1]); paddings[0], paddings[1]);
// gemm // gemm
Tensor out_slice = out_batch.Slice<T>(g * out_step, (g + 1) * out_step); Tensor out_slice = out_batch.Slice(g * out_step, (g + 1) * out_step);
Tensor filter_slice = filter.Slice<T>(g * out_step, (g + 1) * out_step); Tensor filter_slice = filter.Slice(g * out_step, (g + 1) * out_step);
math::matmul<Place, T>(context.device_context(), filter_slice, false, math::matmul<Place, T>(context.device_context(), filter_slice, false,
col_matrix, false, T(1.0), &out_slice, T(0.0)); col_matrix, false, T(1.0), &out_slice, T(0.0));
} }
...@@ -198,22 +198,20 @@ class GemmConvGrad2DKernel : public framework::OpKernel<T> { ...@@ -198,22 +198,20 @@ class GemmConvGrad2DKernel : public framework::OpKernel<T> {
for (int i = 0; i < batch_size; i++) { for (int i = 0; i < batch_size; i++) {
Tensor out_grad_batch = Tensor out_grad_batch =
output_grad->Slice<T>(i, i + 1).Resize(output_matrix_shape); output_grad->Slice(i, i + 1).Resize(output_matrix_shape);
Tensor in_grad_batch = Tensor in_grad_batch = input_grad->Slice(i, i + 1).Resize(input_shape);
input_grad->Slice<T>(i, i + 1).Resize(input_shape);
for (int g = 0; g < groups; g++) { for (int g = 0; g < groups; g++) {
// gemm // gemm
Tensor out_grad_slice = Tensor out_grad_slice =
out_grad_batch.Slice<T>(g * out_step, (g + 1) * out_step); out_grad_batch.Slice(g * out_step, (g + 1) * out_step);
Tensor filter_slice = Tensor filter_slice = filter.Slice(g * out_step, (g + 1) * out_step);
filter.Slice<T>(g * out_step, (g + 1) * out_step);
math::matmul<Place, T>(context.device_context(), filter_slice, true, math::matmul<Place, T>(context.device_context(), filter_slice, true,
out_grad_slice, false, T(1.0), &col_matrix, out_grad_slice, false, T(1.0), &col_matrix,
T(0.0)); T(0.0));
// col2im // col2im
Tensor in_grad_slice = Tensor in_grad_slice =
in_grad_batch.Slice<T>(g * in_step, (g + 1) * in_step); in_grad_batch.Slice(g * in_step, (g + 1) * in_step);
col2im(context.device_context(), in_grad_slice, col, strides[0], col2im(context.device_context(), in_grad_slice, col, strides[0],
strides[1], paddings[0], paddings[1]); strides[1], paddings[0], paddings[1]);
} }
...@@ -229,19 +227,19 @@ class GemmConvGrad2DKernel : public framework::OpKernel<T> { ...@@ -229,19 +227,19 @@ class GemmConvGrad2DKernel : public framework::OpKernel<T> {
for (int i = 0; i < batch_size; i++) { for (int i = 0; i < batch_size; i++) {
Tensor out_grad_batch = Tensor out_grad_batch =
output_grad->Slice<T>(i, i + 1).Resize(output_matrix_shape); output_grad->Slice(i, i + 1).Resize(output_matrix_shape);
Tensor in_batch = input->Slice<T>(i, i + 1).Resize(input_shape); Tensor in_batch = input->Slice(i, i + 1).Resize(input_shape);
for (int g = 0; g < groups; g++) { for (int g = 0; g < groups; g++) {
// im2col // im2col
Tensor out_grad_slice = Tensor out_grad_slice =
out_grad_batch.Slice<T>(g * out_step, (g + 1) * out_step); out_grad_batch.Slice(g * out_step, (g + 1) * out_step);
Tensor in_slice = in_batch.Slice<T>(g * in_step, (g + 1) * in_step); Tensor in_slice = in_batch.Slice(g * in_step, (g + 1) * in_step);
im2col(context.device_context(), in_slice, col, strides[0], im2col(context.device_context(), in_slice, col, strides[0],
strides[1], paddings[0], paddings[1]); strides[1], paddings[0], paddings[1]);
// gemm // gemm
Tensor filter_grad_slice = Tensor filter_grad_slice =
filter_grad_.Slice<T>(g * out_step, (g + 1) * out_step); filter_grad_.Slice(g * out_step, (g + 1) * out_step);
math::matmul<Place, T>(context.device_context(), out_grad_slice, math::matmul<Place, T>(context.device_context(), out_grad_slice,
false, col_matrix, true, T(1.0), false, col_matrix, true, T(1.0),
&filter_grad_slice, T(1.0)); &filter_grad_slice, T(1.0));
......
...@@ -23,6 +23,7 @@ using framework::Scope; ...@@ -23,6 +23,7 @@ using framework::Scope;
using framework::TensorArray; using framework::TensorArray;
using framework::LoDTensor; using framework::LoDTensor;
using framework::Variable; using framework::Variable;
using framework::OperatorBase;
using framework::DySeqMetaBatch; using framework::DySeqMetaBatch;
namespace detail { namespace detail {
...@@ -43,72 +44,72 @@ inline void CreateVariables(Scope& scope, ...@@ -43,72 +44,72 @@ inline void CreateVariables(Scope& scope,
* be reordered, but the RNN op should not change the `boot_state` as an input * be reordered, but the RNN op should not change the `boot_state` as an input
* variable's content. * variable's content.
*/ */
template <typename T> inline void ReorderInitialState(const DySeqMetaBatch& metas,
inline void ReorderBootState(const DySeqMetaBatch& metas, const LoDTensor& boot_state, LoDTensor* tensor,
const LoDTensor& boot_state, LoDTensor* tensor, const platform::Place& dst_place) {
const platform::Place& dst_place) {
for (size_t seq_id = 0; seq_id < metas.size(); seq_id++) { for (size_t seq_id = 0; seq_id < metas.size(); seq_id++) {
auto slice = tensor->Slice<T>(seq_id, seq_id + 1); auto slice = tensor->Slice(seq_id, seq_id + 1);
auto boot_slice = auto boot_slice =
boot_state.Slice<T>(metas[seq_id].ori_idx, metas[seq_id].ori_idx + 1); boot_state.Slice(metas[seq_id].ori_idx, metas[seq_id].ori_idx + 1);
// TODO(superjom) pass in device context as an argument // TODO(superjom) pass in device context as an argument
slice.template CopyFrom<T>(boot_slice, dst_place, slice.CopyFrom(boot_slice, dst_place, platform::CPUDeviceContext());
platform::CPUDeviceContext());
} }
} }
} // namespace detail inline void RestoreInitialState(const DySeqMetaBatch& metas,
const LoDTensor& tensor, LoDTensor* boot_state,
class DynamicRecurrentOpProtoAndCheckerMaker const platform::Place& dst_place) {
: public framework::OpProtoAndCheckerMaker { for (size_t seq_id = 0; seq_id < metas.size(); seq_id++) {
public: auto slice = tensor.Slice(seq_id, seq_id + 1);
DynamicRecurrentOpProtoAndCheckerMaker(framework::OpProto* proto, auto boot_slice =
framework::OpAttrChecker* op_checker) boot_state->Slice(metas[seq_id].ori_idx, metas[seq_id].ori_idx + 1);
: OpProtoAndCheckerMaker(proto, op_checker) { boot_slice.CopyFrom(slice, dst_place, platform::CPUDeviceContext());
const auto& name = DynamicRecurrentOp::kArgName;
// inputs and outputs stored in proto
AddInput(name.inlinks,
"the inputs that need to be segmented for each step.")
.AsDuplicable();
AddInput(name.boot_memories, "variables to initialize memories.")
.AsDuplicable();
AddOutput(name.outlinks, "the outputs that need to concated for all steps.")
.AsDuplicable();
AddOutput(name.step_scopes, "step scopes");
// Attributes stored in AttributeMap
AddAttr<std::vector<std::string>>(name.pre_memories,
"names of pre-memories");
AddAttr<std::vector<std::string>>(name.memories, "names of memories");
AddComment("This is a RNN operator for varience-length sequences.");
} }
}; }
void DynamicRecurrentOp::Run(const Scope& scope, } // namespace detail
const platform::DeviceContext& dev_ctx) const {
cache_.Init(kArgName, *this, scope, &arg_); // Implementation for forward propagation.
template <>
void RNNAlgorithm::Run<RNNAlgorithm::ComputeMode::kForward>(
const framework::Scope& scope, const framework::OperatorBase& op,
const platform::DeviceContext& dev_ctx) {
SetComputeMode(ComputeMode::kForward);
cache_.Init(kArgNames[mode_], op, scope, &dev_ctx, &arg_);
SplitInputs(); SplitInputs();
CreateScopes(); CreateScopes();
WriteStepInputs(); WriteStepInputs();
InitStates(); InitStates();
WriteStepOutputs(); WriteStepOutputs();
RunSteps();
ConcatOutputs();
}
// call stepnet in all the time steps // Implementation for backward propagation.
for (size_t step = 0; step < cache_.num_steps; step++) { template <>
auto& step_scope = cache_.GetScope(step); void RNNAlgorithm::Run<RNNAlgorithm::ComputeMode::kBackward>(
stepnet_->Run(step_scope, dev_ctx); const framework::Scope& scope, const framework::OperatorBase& op,
const platform::DeviceContext& dev_ctx) {
SetComputeMode(ComputeMode::kBackward);
cache_.Init(kArgNames[mode_], op, scope, &dev_ctx, &arg_);
SplitInputs();
WriteStepInputs();
InitStates();
WriteStepOutputs();
RunSteps();
// copy boot-states' gradients back.
for (const auto& state : arg_.states) {
ExportInitialStateGradient(state);
} }
ConcatOutputs(); ConcatOutputs();
} }
void DynamicRecurrentOp::SplitInputs() const { void RNNAlgorithm::SplitInputs() {
// TODO(superjom) make level a config // TODO(superjom) make level a config
// TODO(superjom) check all the inputs has the same LoD // TODO(superjom) check all the inputs has the same LoD
int level = 0; int level = 0;
for (const auto& item : cache_.inlinks) { for (const auto& item : cache_.inputs) {
const auto& var = item.second; const auto& var = item.second;
const auto& tensor = var->Get<LoDTensor>(); const auto& tensor = var->Get<LoDTensor>();
TensorArray& ta = step_inputs_[item.first]; TensorArray& ta = step_inputs_[item.first];
...@@ -125,8 +126,8 @@ void DynamicRecurrentOp::SplitInputs() const { ...@@ -125,8 +126,8 @@ void DynamicRecurrentOp::SplitInputs() const {
} }
} }
void DynamicRecurrentOp::WriteStepInputs() const { void RNNAlgorithm::WriteStepInputs() {
for (const auto& item : cache_.inlinks) { for (const auto& item : cache_.inputs) {
auto ta_it = step_inputs_.find(item.first); auto ta_it = step_inputs_.find(item.first);
PADDLE_ENFORCE(ta_it != step_inputs_.end(), PADDLE_ENFORCE(ta_it != step_inputs_.end(),
"step_inputs_ not compatible with memory set"); "step_inputs_ not compatible with memory set");
...@@ -138,20 +139,20 @@ void DynamicRecurrentOp::WriteStepInputs() const { ...@@ -138,20 +139,20 @@ void DynamicRecurrentOp::WriteStepInputs() const {
if (var == nullptr) { if (var == nullptr) {
var = step_scope.Var(item.first); var = step_scope.Var(item.first);
} }
var->GetMutable<LoDTensor>()->ShareDataWith<value_type>(tensor); var->GetMutable<LoDTensor>()->ShareDataWith(tensor);
} }
} }
} }
void DynamicRecurrentOp::WriteStepOutputs() const { void RNNAlgorithm::WriteStepOutputs() {
// initialize step outputs // initialize step outputs
for (const auto& item : cache_.outlinks) { for (const auto& item : cache_.outputs) {
step_outputs_.emplace(item.first, TensorArray()); step_outputs_.emplace(item.first, TensorArray());
} }
PADDLE_ENFORCE_GT(step_outputs_.size(), 0UL); PADDLE_ENFORCE_GT(step_outputs_.size(), 0UL);
} }
void DynamicRecurrentOp::CreateScopes() const { void RNNAlgorithm::CreateScopes() {
PADDLE_ENFORCE_GT(cache_.num_steps, 0); PADDLE_ENFORCE_GT(cache_.num_steps, 0);
// resize scopes // resize scopes
size_t num_scopes_need_create = cache_.num_steps - cache_.scopes->size(); size_t num_scopes_need_create = cache_.num_steps - cache_.scopes->size();
...@@ -160,19 +161,19 @@ void DynamicRecurrentOp::CreateScopes() const { ...@@ -160,19 +161,19 @@ void DynamicRecurrentOp::CreateScopes() const {
} }
// init temporary inputs // init temporary inputs
PADDLE_ENFORCE_NOT_NULL(stepnet_, "stepnet should be set first"); PADDLE_ENFORCE_NOT_NULL(step_unit_, "stepnet should be set first");
std::vector<std::string> memories; std::vector<std::string> states;
std::vector<std::string> pre_memories; std::vector<std::string> ex_states;
std::vector<std::string> stepnet_outputs; std::vector<std::string> step_unit_outputs;
std::transform(arg_.memories.begin(), arg_.memories.end(), std::transform(arg_.states.begin(), arg_.states.end(),
std::back_inserter(memories), std::back_inserter(states),
[](const rnn::MemoryAttr& m) { return m.var; }); [](const rnn::StateAttr& m) { return m.var; });
std::transform(arg_.memories.begin(), arg_.memories.end(), std::transform(arg_.states.begin(), arg_.states.end(),
std::back_inserter(pre_memories), std::back_inserter(ex_states),
[](const rnn::MemoryAttr& m) { return m.pre_var; }); [](const rnn::StateAttr& m) { return m.pre_var; });
for (const auto& item : stepnet_->Outputs()) { for (const auto& item : step_unit_->Outputs()) {
for (const auto& var : item.second) { for (const auto& var : item.second) {
stepnet_outputs.push_back(var); step_unit_outputs.push_back(var);
} }
} }
...@@ -180,13 +181,13 @@ void DynamicRecurrentOp::CreateScopes() const { ...@@ -180,13 +181,13 @@ void DynamicRecurrentOp::CreateScopes() const {
auto& scope = cache_.GetScope(step); auto& scope = cache_.GetScope(step);
detail::CreateVariables(scope, arg_.inlinks); detail::CreateVariables(scope, arg_.inlinks);
detail::CreateVariables(scope, arg_.outlinks); detail::CreateVariables(scope, arg_.outlinks);
detail::CreateVariables(scope, memories); detail::CreateVariables(scope, states);
detail::CreateVariables(scope, pre_memories); detail::CreateVariables(scope, ex_states);
detail::CreateVariables(scope, stepnet_outputs); detail::CreateVariables(scope, step_unit_outputs);
} }
} }
void DynamicRecurrentOp::ConcatOutputs() const { void RNNAlgorithm::ConcatOutputs() {
// TODO(superjom) transform this to a config // TODO(superjom) transform this to a config
int level = 0; int level = 0;
for (size_t step = 0; step < cache_.num_steps; step++) { for (size_t step = 0; step < cache_.num_steps; step++) {
...@@ -199,31 +200,45 @@ void DynamicRecurrentOp::ConcatOutputs() const { ...@@ -199,31 +200,45 @@ void DynamicRecurrentOp::ConcatOutputs() const {
item.second.WriteShared(step, *tensor); item.second.WriteShared(step, *tensor);
} }
} }
// the inlinks' lods should be the same, so randomly get one lod. // the inputs' lods should be the same, so randomly get one lod.
const auto& some_lod = const auto& some_lod =
cache_.scope->FindVar(arg_.inlinks.front())->Get<LoDTensor>().lod(); cache_.scope->FindVar(arg_.inlinks.front())->Get<LoDTensor>().lod();
const auto& some_meta = dy_seq_metas_[arg_.inlinks.front()]; const auto& some_meta = dy_seq_metas_[arg_.inlinks.front()];
for (auto& item : step_outputs_) { for (auto& item : step_outputs_) {
auto tensor = item.second.Pack(level, some_meta, some_lod); auto tensor = item.second.Pack(level, some_meta, some_lod);
auto* output = cache_.outlinks[item.first]->GetMutable<LoDTensor>(); auto* output = cache_.outputs[item.first]->GetMutable<LoDTensor>();
const_cast<LoDTensor*>(output)->ShareDataWith<value_type>(tensor); const_cast<LoDTensor*>(output)->ShareDataWith(tensor);
}
}
void RNNAlgorithm::RunSteps() {
if (IsBackward()) {
// call stepnet in all the time steps reversely
for (int step = cache_.num_steps - 1; step >= 0; step--) {
auto& step_scope = cache_.GetScope(step);
step_unit_->Run(step_scope, *cache_.dev_ctx);
}
} else {
for (size_t step = 0; step < cache_.num_steps; step++) {
auto& step_scope = cache_.GetScope(step);
step_unit_->Run(step_scope, *cache_.dev_ctx);
}
} }
} }
void DynamicRecurrentOp::InitStates() const { void RNNAlgorithm::InitStates() {
for (size_t step = 0; step < cache_.num_steps; step++) { for (size_t step = 0; step < cache_.num_steps; step++) {
for (const auto& memory : arg_.memories) { for (const auto& state : arg_.states) {
CreateState(memory, step); CreateState(state, step);
LinkState(memory, step); LinkState(state, step);
} }
} }
} }
void DynamicRecurrentOp::CreateState(const rnn::MemoryAttr& memory, void RNNAlgorithm::CreateState(const rnn::StateAttr& state_attr, size_t step) {
size_t step) const {
auto& scope = cache_.GetScope(step); auto& scope = cache_.GetScope(step);
auto& state = *cache_.GetTensor(scope, memory.var); auto& state = *cache_.GetTensor(scope, state_attr.var);
auto& boot_state = *cache_.GetTensor(*cache_.scope, memory.boot_var); auto& boot_state = *cache_.GetTensor(*cache_.scope, state_attr.boot_var);
size_t num_instances = size_t num_instances =
step_inputs_[arg_.inlinks.front()].Read(step).dims()[0]; step_inputs_[arg_.inlinks.front()].Read(step).dims()[0];
...@@ -232,56 +247,79 @@ void DynamicRecurrentOp::CreateState(const rnn::MemoryAttr& memory, ...@@ -232,56 +247,79 @@ void DynamicRecurrentOp::CreateState(const rnn::MemoryAttr& memory,
state.Resize(dims); state.Resize(dims);
state.mutable_data<value_type>(platform::CPUPlace()); state.mutable_data<value_type>(platform::CPUPlace());
states_[memory.var].WriteShared(step, state); states_[state_attr.var].WriteShared(step, state);
} }
void DynamicRecurrentOp::LinkState(const rnn::MemoryAttr& memory, void RNNAlgorithm::LinkState(const rnn::StateAttr& state, size_t step) {
size_t step) const {
auto& scope = cache_.GetScope(step); auto& scope = cache_.GetScope(step);
auto& state_pre = *cache_.GetTensor(scope, memory.pre_var); auto& state_pre = *cache_.GetTensor(scope, state.pre_var);
// process the first state's boot-state(the 0-step in forward mode or the
// last step in backward mode)
// Only forward mode need to link the boot-state to the `pre-state` in first
// time step. In backward mode, need to copy the gradient of `pre-state` in
// first time step to the gradient of `boot-state`.
if (step == 0 && IsForward()) {
LinkInitialState(state);
} else {
size_t num_instances =
step_inputs_[arg_.inlinks.front()].Read(step).dims()[0];
auto* pre_state = cache_.GetTensor(cache_.GetScope(step - 1), state.var);
// shink and share from previous state
auto shrinked_pre_state = pre_state->Slice(0, num_instances);
state_pre.ShareDataWith(shrinked_pre_state);
}
}
void RNNAlgorithm::LinkInitialState(const rnn::StateAttr& state) {
// all the step_inputs' metas should be the same, just randomly select one // all the step_inputs' metas should be the same, just randomly select one
// and get the dyseq meta. // and get the dyseq meta.
const auto& some_meta = dy_seq_metas_[arg_.inlinks.front()]; const auto& some_meta = dy_seq_metas_[arg_.inlinks.front()];
size_t num_instances = auto& scope = cache_.GetScope(0);
step_inputs_[arg_.inlinks.front()].Read(step).dims()[0]; auto& state_pre = *cache_.GetTensor(scope, state.pre_var);
auto* pre_state = cache_.GetTensor(*cache_.scope, state.boot_var);
pre_state->mutable_data<float>(platform::CPUPlace());
// allocate state
state_pre.Resize(pre_state->dims());
state_pre.mutable_data<value_type>(platform::CPUPlace());
detail::ReorderInitialState(some_meta, *pre_state, &state_pre,
pre_state->place());
}
LoDTensor* pre_state{nullptr}; void RNNAlgorithm::ExportInitialStateGradient(const rnn::StateAttr& state) {
if (step == 0) { // all the step_inputs' metas should be the same, just randomly select one
pre_state = cache_.GetTensor(*cache_.scope, memory.boot_var); // and get the dyseq meta.
pre_state->mutable_data<float>(platform::CPUPlace()); const auto& some_meta = dy_seq_metas_[arg_.inlinks.front()];
// allocate memory auto& scope = cache_.GetScope(0);
state_pre.Resize(pre_state->dims());
state_pre.mutable_data<value_type>(platform::CPUPlace());
detail::ReorderBootState<value_type>(some_meta, *pre_state, &state_pre,
pre_state->place());
} else {
pre_state = cache_.GetTensor(cache_.GetScope(step - 1), memory.var);
}
// shink and share from previous state auto& state_pre = *cache_.GetTensor(scope, state.pre_var);
auto shrinked_pre_state = pre_state->Slice<value_type>(0, num_instances); auto& pre_state = *cache_.GetTensor(*cache_.scope, state.boot_var);
state_pre.ShareDataWith<value_type>(shrinked_pre_state); pre_state.Resize(state_pre.dims());
detail::RestoreInitialState(some_meta, state_pre, &pre_state,
pre_state.place());
} }
void DynamicRecurrentOp::ArgCache::Init( void RNNAlgorithm::ArgCache::Init(const rnn::ArgumentName& name,
const rnn::ArgumentName& name, const paddle::framework::OperatorBase& op, const paddle::framework::OperatorBase& op,
const paddle::framework::Scope& scope, rnn::Argument* arg) { const paddle::framework::Scope& scope,
platform::DeviceContext const* dev_ctx,
rnn::Argument* arg) {
this->scope = &scope; this->scope = &scope;
InitArgument(name, op, arg); InitArgument(name, op, arg);
CacheScopes(scope, *arg); CacheScopes(scope, *arg);
CacheInlinks(scope, arg->inlinks); CacheInlinks(scope, arg->inlinks);
CacheOutlinks(scope, arg->outlinks); CacheOutlinks(scope, arg->outlinks);
this->dev_ctx = dev_ctx;
} }
void DynamicRecurrentOp::ArgCache::InitArgument(const rnn::ArgumentName& name, void RNNAlgorithm::ArgCache::InitArgument(const rnn::ArgumentName& name,
const OperatorBase& op, const OperatorBase& op,
rnn::Argument* arg) { rnn::Argument* arg) {
rnn::InitArgument(name, arg, op, false /*is_grad*/); rnn::InitArgument(name, arg, op, false /*is_grad*/);
} }
void DynamicRecurrentOp::ArgCache::CacheScopes(const Scope& scope, void RNNAlgorithm::ArgCache::CacheScopes(const Scope& scope,
const rnn::Argument& arg) { const rnn::Argument& arg) {
auto scopes_var = scope.FindVar(arg.step_scopes); auto scopes_var = scope.FindVar(arg.step_scopes);
PADDLE_ENFORCE(scopes_var != nullptr, PADDLE_ENFORCE(scopes_var != nullptr,
"the step_scopes output argument [%s] should be created first " "the step_scopes output argument [%s] should be created first "
...@@ -290,45 +328,85 @@ void DynamicRecurrentOp::ArgCache::CacheScopes(const Scope& scope, ...@@ -290,45 +328,85 @@ void DynamicRecurrentOp::ArgCache::CacheScopes(const Scope& scope,
this->scopes = scopes_var->GetMutable<std::vector<Scope*>>(); this->scopes = scopes_var->GetMutable<std::vector<Scope*>>();
} }
void DynamicRecurrentOp::ArgCache::CacheInlinks( void RNNAlgorithm::ArgCache::CacheInlinks(
const Scope& scope, const std::vector<std::string>& names) { const Scope& scope, const std::vector<std::string>& names) {
for (auto name : names) { for (auto name : names) {
auto* var = GetVariable(scope, name); auto* var = GetVariable(scope, name);
inlinks[name] = var; inputs[name] = var;
} }
} }
void DynamicRecurrentOp::ArgCache::CacheOutlinks( void RNNAlgorithm::ArgCache::CacheOutlinks(
const Scope& scope, const std::vector<std::string>& names) { const Scope& scope, const std::vector<std::string>& names) {
for (auto name : names) { for (auto name : names) {
auto* var = GetVariable(scope, name); auto* var = GetVariable(scope, name);
outlinks[name] = var; outputs[name] = var;
} }
} }
Variable* DynamicRecurrentOp::ArgCache::GetVariable(const Scope& scope, Variable* RNNAlgorithm::ArgCache::GetVariable(const Scope& scope,
const std::string& name) { const std::string& name) {
auto* var = scope.FindVar(name); auto* var = scope.FindVar(name);
PADDLE_ENFORCE_NOT_NULL(var, "variable [%s] not exist in scope", name); PADDLE_ENFORCE_NOT_NULL(var, "variable [%s] not exist in scope", name);
return var; return var;
} }
LoDTensor* DynamicRecurrentOp::ArgCache::GetTensor( LoDTensor* RNNAlgorithm::ArgCache::GetTensor(const framework::Scope& scope,
const framework::Scope& scope, const std::string& name) { const std::string& name) {
auto* var = GetVariable(scope, name); auto* var = GetVariable(scope, name);
return var->GetMutable<LoDTensor>(); return var->GetMutable<LoDTensor>();
} }
const rnn::ArgumentName DynamicRecurrentOp::kArgName{ const std::array<rnn::ArgumentName, 2> RNNAlgorithm::kArgNames{
"step_net", "step_scopes", "inlinks", "outlinks", {rnn::ArgumentName{"step_unit", "step_scopes", "inputs", "outputs",
"memories", "pre_memories", "boot_memories"}; "states", "ex_states", "initial_states"},
rnn::ArgumentName{"step_unit", "step_scopes@GRAD", "outputs@GRAD",
"inputs@GRAD", "states", "ex_states",
"initial_states@GRAD"}}};
void DynamicRecurrentOp::Run(const framework::Scope& scope,
const platform::DeviceContext& dev_ctx) const {
rnn.Run<RNNAlgorithm::ComputeMode::kForward>(
scope, *dynamic_cast<const OperatorBase*>(this), dev_ctx);
}
void DynamicRecurrentGradientOp::Run( void DynamicRecurrentGradientOp::Run(
const Scope& scope, const platform::DeviceContext& dev_ctx) const {} const Scope& scope, const platform::DeviceContext& dev_ctx) const {
rnn.Run<RNNAlgorithm::ComputeMode::kBackward>(
scope, *dynamic_cast<const OperatorBase*>(this), dev_ctx);
}
class DynamicRecurrentOpProtoAndCheckerMaker
: public framework::OpProtoAndCheckerMaker {
public:
DynamicRecurrentOpProtoAndCheckerMaker(framework::OpProto* proto,
framework::OpAttrChecker* op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {
const auto& name =
RNNAlgorithm::kArgNames[RNNAlgorithm::ComputeMode::kForward];
// inputs and outputs stored in proto
AddInput(name.inlinks,
"the inputs that need to be segmented for each step.")
.AsDuplicable();
AddInput(name.initial_states, "variables to initialize states.")
.AsDuplicable();
AddOutput(name.outlinks, "the outputs that need to concated for all steps.")
.AsDuplicable();
AddOutput(name.step_scopes, "step scopes");
// Attributes stored in AttributeMap
AddAttr<std::vector<std::string>>(name.ex_states, "names of ex_states");
AddAttr<std::vector<std::string>>(name.states, "names of states");
AddComment("This is a RNN operator for varience-length sequences.");
}
};
} // namespace operators } // namespace operators
} // namespace paddle } // namespace paddle
REGISTER_OP_WITHOUT_GRADIENT( REGISTER_OP(dynamic_recurrent, paddle::operators::DynamicRecurrentOp,
dynamic_recurrent, paddle::operators::DynamicRecurrentOp, paddle::operators::DynamicRecurrentOpProtoAndCheckerMaker,
paddle::operators::DynamicRecurrentOpProtoAndCheckerMaker); dynamic_recurrent_grad,
paddle::operators::DynamicRecurrentGradientOp);
...@@ -27,47 +27,39 @@ ...@@ -27,47 +27,39 @@
namespace paddle { namespace paddle {
namespace operators { namespace operators {
class DynamicRecurrentOp : public framework::OperatorBase { class RNNAlgorithm {
public: public:
static const rnn::ArgumentName kArgName; enum ComputeMode { kForward = 0, kBackward = 1 };
static const std::array<rnn::ArgumentName, 2> kArgNames;
using value_type = float; using value_type = float;
DynamicRecurrentOp(const std::string& type, /*
const framework::VariableNameMap& inputs, * Different `Run` method for forward and backward, `_` is just for template
const framework::VariableNameMap& outputs, * specifialization.
const framework::AttributeMap& attrs) */
: OperatorBase(type, inputs, outputs, attrs) {} template <ComputeMode _>
void Run(const framework::Scope& scope, const framework::OperatorBase& op,
DynamicRecurrentOp(const DynamicRecurrentOp& o) const platform::DeviceContext& dev_ctx);
: framework::OperatorBase(
static_cast<const framework::OperatorBase&>(o)) {
// TODO(yuyang18): Implement copy ctor well.
PADDLE_THROW("Not implemented");
}
void Run(const framework::Scope& scope,
const platform::DeviceContext& dev_ctx) const override;
/* /*
* Split the inputs(LoDTensors) to segments for each time step. * Split the inputs(LoDTensors) to segments for each time step.
*/ */
void SplitInputs() const; void SplitInputs();
/* /*
* Create step-scopes to store temporary outputs in each time steps. * Create step-scopes to store temporary outputs in each time steps.
*/ */
void CreateScopes() const; void CreateScopes();
/* /*
* Link TensorArray steps to the corresponding variables located in * Link TensorArray steps to the corresponding variables located in
* step-scopes. * step-scopes.
*/ */
void WriteStepInputs() const; void WriteStepInputs();
/* /*
* Write output of each step to the corresponding TensorArray. * Write output of each step to the corresponding TensorArray.
*/ */
void WriteStepOutputs() const; void WriteStepOutputs();
/* /*
* Initialize the states, each state will have a corresponding pre-state, * Initialize the states, each state will have a corresponding pre-state,
...@@ -75,54 +67,83 @@ class DynamicRecurrentOp : public framework::OperatorBase { ...@@ -75,54 +67,83 @@ class DynamicRecurrentOp : public framework::OperatorBase {
* pre-state in the first time step will be initialized with an zero tensor or * pre-state in the first time step will be initialized with an zero tensor or
* a tensor in parent scope if is provided. * a tensor in parent scope if is provided.
*/ */
void InitStates() const; void InitStates();
/* /*
* Create state variables for each time step. * Create state variables for each time step.
*/ */
void CreateState(const rnn::MemoryAttr& memory, size_t step) const; void CreateState(const rnn::StateAttr& state, size_t step);
/* /*
* Link pre-state variable in current scope to the state variable in the * Link pre-state variable in current scope to the state variable in the
* previous time step (scope). * previous time step (scope) by reference.
*/
void LinkState(const rnn::StateAttr& state, size_t step);
/*
* Link the pre-state of the first time step to the `boot-state` in parent's
* scope.
*/
void LinkInitialState(const rnn::StateAttr& state);
/*
* Copy the gradient from `pre-state` in the first step-scope to the
* `boot-state` in parent's scope.
*/
void ExportInitialStateGradient(const rnn::StateAttr& state);
/*
* Calculate time steps.
*/ */
void LinkState(const rnn::MemoryAttr& memory, size_t step) const; void RunSteps();
/* /*
* Concatenate outputs in each time step and generate a LoDTensor. * Concatenate outputs in each time step and generate a LoDTensor.
*/ */
void ConcatOutputs() const; void ConcatOutputs();
void SetComputeMode(ComputeMode mode) { mode_ = mode; }
bool IsForward() const { return mode_ == ComputeMode::kForward; }
bool IsBackward() const { return mode_ == ComputeMode::kBackward; }
/* /*
* set a stepnet that is created according to a RecurrentOp's stepnet. * set a step unit that is created according to a RecurrentOp's step unit.
*/ */
void SetStepNet(std::unique_ptr<OperatorBase> net) { void SetStepUnit(std::unique_ptr<framework::OperatorBase> step_unit) {
PADDLE_ENFORCE_NOT_NULL(net); PADDLE_ENFORCE_NOT_NULL(step_unit);
stepnet_ = std::move(net); step_unit_ = std::move(step_unit);
} }
const OperatorBase& GetStepNet() const { return *stepnet_; } const framework::OperatorBase& GetStepUnit() const { return *step_unit_; }
const framework::TensorArray& state(const std::string& name) const { const framework::TensorArray& state(const std::string& name) const {
return states_[name]; auto it = states_.find(name);
PADDLE_ENFORCE(it != states_.end());
return it->second;
} }
const framework::TensorArray& step_input(const std::string& name) const { const framework::TensorArray& step_input(const std::string& name) const {
return step_inputs_[name]; auto it = step_inputs_.find(name);
PADDLE_ENFORCE(it != step_inputs_.end());
return it->second;
} }
const framework::TensorArray& step_output(const std::string& name) const { const framework::TensorArray& step_output(const std::string& name) const {
return step_outputs_[name]; auto it = step_outputs_.find(name);
PADDLE_ENFORCE(it != step_outputs_.end());
return it->second;
} }
protected: protected:
struct ArgCache { struct ArgCache {
framework::Scope const* scope; framework::Scope const* scope;
std::vector<framework::Scope*>* scopes; std::vector<framework::Scope*>* scopes;
std::map<std::string, framework::Variable*> inlinks; std::map<std::string, framework::Variable*> inputs;
std::map<std::string, framework::Variable*> outlinks; std::map<std::string, framework::Variable*> outputs;
platform::DeviceContext const* dev_ctx;
size_t num_steps{0}; size_t num_steps{0};
void Init(const rnn::ArgumentName& name, const OperatorBase& op, void Init(const rnn::ArgumentName& name, const framework::OperatorBase& op,
const framework::Scope& scope, rnn::Argument* arg); const framework::Scope& scope,
platform::DeviceContext const* dev_ctx, rnn::Argument* arg);
framework::Scope& GetScope(size_t index) { framework::Scope& GetScope(size_t index) {
PADDLE_ENFORCE_LT(index, num_steps); PADDLE_ENFORCE_LT(index, num_steps);
...@@ -133,8 +154,8 @@ class DynamicRecurrentOp : public framework::OperatorBase { ...@@ -133,8 +154,8 @@ class DynamicRecurrentOp : public framework::OperatorBase {
const std::string& name); const std::string& name);
private: private:
void InitArgument(const rnn::ArgumentName& name, const OperatorBase& op, void InitArgument(const rnn::ArgumentName& name,
rnn::Argument* arg); const framework::OperatorBase& op, rnn::Argument* arg);
void CacheScopes(const framework::Scope& scope, const rnn::Argument& arg); void CacheScopes(const framework::Scope& scope, const rnn::Argument& arg);
void CacheInlinks(const framework::Scope& scope, void CacheInlinks(const framework::Scope& scope,
const std::vector<std::string>& names); const std::vector<std::string>& names);
...@@ -145,27 +166,49 @@ class DynamicRecurrentOp : public framework::OperatorBase { ...@@ -145,27 +166,49 @@ class DynamicRecurrentOp : public framework::OperatorBase {
}; };
private: private:
std::unique_ptr<OperatorBase> stepnet_; std::unique_ptr<framework::OperatorBase> step_unit_;
mutable std::map<std::string, framework::TensorArray> states_; std::map<std::string, framework::TensorArray> states_;
mutable std::map<std::string, framework::TensorArray> step_inputs_; std::map<std::string, framework::TensorArray> step_inputs_;
mutable std::map<std::string, framework::TensorArray> step_outputs_; std::map<std::string, framework::TensorArray> step_outputs_;
mutable std::map<std::string, std::vector<framework::DySeqMeta>> std::map<std::string, std::vector<framework::DySeqMeta>> dy_seq_metas_;
dy_seq_metas_; rnn::Argument arg_;
mutable rnn::Argument arg_; ArgCache cache_;
mutable ArgCache cache_; ComputeMode mode_{ComputeMode::kForward};
#ifdef PADDLE_WITH_TESTING #ifdef PADDLE_WITH_TESTING
friend class DynamicRecurrentOpTestHelper; // test forward
FRIEND_TEST(DynamicRecurrentOpTestHelper, SplitInputs); friend class RNNAlgorithmTestHelper;
FRIEND_TEST(DynamicRecurrentOpTestHelper, CreateCache); FRIEND_TEST(RNNAlgorithmTestHelper, SplitInputs);
FRIEND_TEST(DynamicRecurrentOpTestHelper, CreateScopes); FRIEND_TEST(RNNAlgorithmTestHelper, CreateCache);
FRIEND_TEST(DynamicRecurrentOpTestHelper, WriteStepInputs); FRIEND_TEST(RNNAlgorithmTestHelper, CreateScopes);
FRIEND_TEST(DynamicRecurrentOpTestHelper, WriteStepOutputs); FRIEND_TEST(RNNAlgorithmTestHelper, WriteStepInputs);
FRIEND_TEST(DynamicRecurrentOpTestHelper, InitStates); FRIEND_TEST(RNNAlgorithmTestHelper, WriteStepOutputs);
FRIEND_TEST(DynamicRecurrentOpTestHelper, ConcatOutputs); FRIEND_TEST(RNNAlgorithmTestHelper, InitStates);
FRIEND_TEST(RNNAlgorithmTestHelper, ConcatOutputs);
// TODO(superjom) test backward
#endif #endif
}; };
class DynamicRecurrentOp : public framework::OperatorBase {
public:
DynamicRecurrentOp(const std::string& type,
const framework::VariableNameMap& inputs,
const framework::VariableNameMap& outputs,
const framework::AttributeMap& attrs)
: OperatorBase(type, inputs, outputs, attrs) {}
DynamicRecurrentOp(const DynamicRecurrentOp& o)
: framework::OperatorBase(
static_cast<const framework::OperatorBase&>(o)) {
PADDLE_THROW("Not implemented");
}
void Run(const framework::Scope& scope,
const platform::DeviceContext& dev_ctx) const override;
mutable RNNAlgorithm rnn;
};
class DynamicRecurrentGradientOp : public framework::OperatorBase { class DynamicRecurrentGradientOp : public framework::OperatorBase {
public: public:
DynamicRecurrentGradientOp(const std::string& type, DynamicRecurrentGradientOp(const std::string& type,
...@@ -174,8 +217,16 @@ class DynamicRecurrentGradientOp : public framework::OperatorBase { ...@@ -174,8 +217,16 @@ class DynamicRecurrentGradientOp : public framework::OperatorBase {
const framework::AttributeMap& attrs) const framework::AttributeMap& attrs)
: OperatorBase(type, inputs, outputs, attrs) {} : OperatorBase(type, inputs, outputs, attrs) {}
DynamicRecurrentGradientOp(const DynamicRecurrentGradientOp& o)
: framework::OperatorBase(
static_cast<const framework::OperatorBase&>(o)) {
PADDLE_THROW("Not implemented");
}
void Run(const framework::Scope& scope, void Run(const framework::Scope& scope,
const platform::DeviceContext& dev_ctx) const override; const platform::DeviceContext& dev_ctx) const override;
mutable RNNAlgorithm rnn;
}; };
} // namespace operators } // namespace operators
......
...@@ -43,16 +43,16 @@ LoDTensor* CreateVar(Scope& scope, std::string name, framework::DDim dims, ...@@ -43,16 +43,16 @@ LoDTensor* CreateVar(Scope& scope, std::string name, framework::DDim dims,
return tensor; return tensor;
} }
class DynamicRecurrentOpTestHelper : public ::testing::Test { class RNNAlgorithmTestHelper : public ::testing::Test {
protected: protected:
const rnn::ArgumentName argname = DynamicRecurrentOp::kArgName; const rnn::ArgumentName argname = RNNAlgorithm::kArgNames[0];
virtual void SetUp() override { virtual void SetUp() override {
CreateGlobalVariables(); CreateGlobalVariables();
auto op_desc = CreateOpDesc(); auto op_desc = CreateOpDesc();
op = paddle::framework::OpRegistry::CreateOp(op_desc, nullptr); op = paddle::framework::OpRegistry::CreateOp(op_desc, nullptr);
dop = dynamic_cast<DynamicRecurrentOp*>(op.get()); dop = &(dynamic_cast<DynamicRecurrentOp*>(op.get())->rnn);
InitCacheManually(); InitCacheManually();
InitStepNet(); InitStepNet();
} }
...@@ -63,20 +63,20 @@ class DynamicRecurrentOpTestHelper : public ::testing::Test { ...@@ -63,20 +63,20 @@ class DynamicRecurrentOpTestHelper : public ::testing::Test {
op_desc.set_type("dynamic_recurrent"); op_desc.set_type("dynamic_recurrent");
OpDescNewVar(argname.inlinks, {"in0"}, op_desc.add_inputs()); OpDescNewVar(argname.inlinks, {"in0"}, op_desc.add_inputs());
OpDescNewVar(argname.boot_memories, {"boot_mem"}, op_desc.add_inputs()); OpDescNewVar(argname.initial_states, {"boot_mem"}, op_desc.add_inputs());
OpDescNewVar(argname.step_scopes, {"step_scopes"}, op_desc.add_outputs()); OpDescNewVar(argname.step_scopes, {"step_scopes"}, op_desc.add_outputs());
OpDescNewVar(argname.outlinks, {"out0"}, op_desc.add_outputs()); OpDescNewVar(argname.outlinks, {"out0"}, op_desc.add_outputs());
// set pre-memories // set pre-states
auto pre_memories = op_desc.mutable_attrs()->Add(); auto pre_memories = op_desc.mutable_attrs()->Add();
pre_memories->set_name(argname.pre_memories); pre_memories->set_name(argname.ex_states);
pre_memories->set_type(paddle::framework::AttrType::STRINGS); pre_memories->set_type(paddle::framework::AttrType::STRINGS);
auto pre_memories_item = pre_memories->add_strings(); auto pre_memories_item = pre_memories->add_strings();
*pre_memories_item = "mem@pre"; *pre_memories_item = "mem@pre";
// set memories // set states
auto memories = op_desc.mutable_attrs()->Add(); auto memories = op_desc.mutable_attrs()->Add();
memories->set_name(argname.memories); memories->set_name(argname.states);
memories->set_type(paddle::framework::AttrType::STRINGS); memories->set_type(paddle::framework::AttrType::STRINGS);
auto memories_item = memories->add_strings(); auto memories_item = memories->add_strings();
*memories_item = "mem"; *memories_item = "mem";
...@@ -113,32 +113,33 @@ class DynamicRecurrentOpTestHelper : public ::testing::Test { ...@@ -113,32 +113,33 @@ class DynamicRecurrentOpTestHelper : public ::testing::Test {
} }
void InitCacheManually() { void InitCacheManually() {
dop->cache_.Init(DynamicRecurrentOp::kArgName, *dop, scope, &dop->arg_); dop->cache_.Init(RNNAlgorithm::kArgNames[0], *op, scope, &device_context,
&dop->arg_);
} }
void InitStepNet() { void InitStepNet() {
std::unique_ptr<framework::OperatorBase> stepnet{new NetOp}; std::unique_ptr<framework::OperatorBase> stepnet{new NetOp};
dynamic_cast<NetOp*>(stepnet.get()) dynamic_cast<NetOp*>(stepnet.get())
->AppendOp(std::unique_ptr<TestOp>(new TestOp( ->AppendOp(std::unique_ptr<TestOp>(new TestOp(
"test", {{"inlinks", {"in0"}}, {"boot_memories", {"boot_mem"}}}, "test", {{"inputs", {"in0"}}, {"initial_states", {"boot_mem"}}},
{{"outlinks", {"out0"}}, {"step_scopes", {"step_scopes"}}}, {}))); {{"outputs", {"out0"}}, {"step_scopes", {"step_scopes"}}}, {})));
dop->SetStepNet(std::move(stepnet)); dop->SetStepUnit(std::move(stepnet));
} }
protected: protected:
DynamicRecurrentOp* dop; RNNAlgorithm* dop;
std::unique_ptr<framework::OperatorBase> op; std::unique_ptr<framework::OperatorBase> op;
paddle::platform::CPUDeviceContext device_context; paddle::platform::CPUDeviceContext device_context;
paddle::framework::Scope scope; paddle::framework::Scope scope;
}; };
TEST_F(DynamicRecurrentOpTestHelper, CreateCache) { TEST_F(RNNAlgorithmTestHelper, CreateCache) {
const rnn::Argument& arg = dop->arg_; const rnn::Argument& arg = dop->arg_;
ASSERT_EQ(arg.inlinks.size(), 1UL); ASSERT_EQ(arg.inlinks.size(), 1UL);
ASSERT_EQ(arg.outlinks.size(), 1UL); ASSERT_EQ(arg.outlinks.size(), 1UL);
} }
TEST_F(DynamicRecurrentOpTestHelper, SplitInputs) { TEST_F(RNNAlgorithmTestHelper, SplitInputs) {
dop->SplitInputs(); dop->SplitInputs();
auto& in0_ta = dop->step_inputs_["in0"]; auto& in0_ta = dop->step_inputs_["in0"];
ASSERT_EQ(in0_ta.size(), 4UL); ASSERT_EQ(in0_ta.size(), 4UL);
...@@ -153,14 +154,14 @@ TEST_F(DynamicRecurrentOpTestHelper, SplitInputs) { ...@@ -153,14 +154,14 @@ TEST_F(DynamicRecurrentOpTestHelper, SplitInputs) {
EXPECT_EQ(batch3.dims()[0], 1); EXPECT_EQ(batch3.dims()[0], 1);
} }
TEST_F(DynamicRecurrentOpTestHelper, CreateScopes) { TEST_F(RNNAlgorithmTestHelper, CreateScopes) {
dop->SplitInputs(); dop->SplitInputs();
dop->CreateScopes(); dop->CreateScopes();
ASSERT_EQ(dop->cache_.num_steps, 4UL); ASSERT_EQ(dop->cache_.num_steps, 4UL);
ASSERT_EQ(dop->cache_.scopes->size(), 4UL); ASSERT_EQ(dop->cache_.scopes->size(), 4UL);
} }
TEST_F(DynamicRecurrentOpTestHelper, WriteStepInputs) { TEST_F(RNNAlgorithmTestHelper, WriteStepInputs) {
dop->SplitInputs(); dop->SplitInputs();
dop->CreateScopes(); dop->CreateScopes();
dop->WriteStepInputs(); dop->WriteStepInputs();
...@@ -173,7 +174,7 @@ TEST_F(DynamicRecurrentOpTestHelper, WriteStepInputs) { ...@@ -173,7 +174,7 @@ TEST_F(DynamicRecurrentOpTestHelper, WriteStepInputs) {
} }
} }
TEST_F(DynamicRecurrentOpTestHelper, WriteStepOutputs) { TEST_F(RNNAlgorithmTestHelper, WriteStepOutputs) {
dop->SplitInputs(); dop->SplitInputs();
dop->CreateScopes(); dop->CreateScopes();
dop->WriteStepInputs(); dop->WriteStepInputs();
...@@ -187,11 +188,12 @@ TEST_F(DynamicRecurrentOpTestHelper, WriteStepOutputs) { ...@@ -187,11 +188,12 @@ TEST_F(DynamicRecurrentOpTestHelper, WriteStepOutputs) {
} }
} }
TEST_F(DynamicRecurrentOpTestHelper, ConcatOutputs) { TEST_F(RNNAlgorithmTestHelper, ConcatOutputs) {
// Let's leave this test to python unittest. // Let's leave this test to python unittest.
} }
TEST_F(DynamicRecurrentOpTestHelper, InitStates) { TEST_F(RNNAlgorithmTestHelper, InitStates) {
dop->SetComputeMode(RNNAlgorithm::ComputeMode::kForward);
dop->SplitInputs(); dop->SplitInputs();
dop->CreateScopes(); dop->CreateScopes();
dop->WriteStepInputs(); dop->WriteStepInputs();
...@@ -208,12 +210,6 @@ TEST_F(DynamicRecurrentOpTestHelper, InitStates) { ...@@ -208,12 +210,6 @@ TEST_F(DynamicRecurrentOpTestHelper, InitStates) {
auto* boot_state = scope.FindVar("boot_mem"); auto* boot_state = scope.FindVar("boot_mem");
ASSERT_TRUE(boot_state != nullptr); ASSERT_TRUE(boot_state != nullptr);
if (step == 0) {
// check pre_state is a reference of boot_state
ASSERT_EQ(boot_state->Get<LoDTensor>().data<float>(),
pre_state->Get<LoDTensor>().data<float>());
}
} }
} }
......
...@@ -108,7 +108,7 @@ void ElementwiseCompute(const framework::ExecutionContext& ctx) { ...@@ -108,7 +108,7 @@ void ElementwiseCompute(const framework::ExecutionContext& ctx) {
PADDLE_ENFORCE_GE(x_dims.size(), y_dims.size(), PADDLE_ENFORCE_GE(x_dims.size(), y_dims.size(),
"Rank of first input must >= rank of second input.") "Rank of first input must >= rank of second input.")
if (x_dims == y_dims || product(y_dims) == 1) { if (x_dims == y_dims) {
functor f; functor f;
f.template Run<Place, T>(x, y, z, ctx); f.template Run<Place, T>(x, y, z, ctx);
return; return;
...@@ -174,12 +174,6 @@ void ElementwiseGradCompute(const framework::ExecutionContext& ctx) { ...@@ -174,12 +174,6 @@ void ElementwiseGradCompute(const framework::ExecutionContext& ctx) {
return; return;
} }
if (product(y_dims) == 1) {
functor1 f;
f(place, x, y, out, dx, dy, dout);
return;
}
int axis = ctx.Attr<int>("axis"); int axis = ctx.Attr<int>("axis");
axis = (axis == -1 ? x_dims.size() - y_dims.size() : axis); axis = (axis == -1 ? x_dims.size() - y_dims.size() : axis);
......
...@@ -47,7 +47,7 @@ class FeedOp : public framework::OperatorBase { ...@@ -47,7 +47,7 @@ class FeedOp : public framework::OperatorBase {
auto &feed_list = feed_var->Get<framework::FeedFetchList>(); auto &feed_list = feed_var->Get<framework::FeedFetchList>();
auto &feed_item = feed_list.at(static_cast<size_t>(col)); auto &feed_item = feed_list.at(static_cast<size_t>(col));
auto *out_item = out_var->GetMutable<framework::FeedFetchType>(); auto *out_item = out_var->GetMutable<framework::FeedFetchType>();
out_item->CopyFromTensor(feed_item, dev_ctx.GetPlace(), dev_ctx); out_item->CopyFrom(feed_item, dev_ctx.GetPlace(), dev_ctx);
out_item->set_lod(feed_item.lod()); out_item->set_lod(feed_item.lod());
} }
}; };
......
...@@ -51,7 +51,7 @@ class FetchOp : public framework::OperatorBase { ...@@ -51,7 +51,7 @@ class FetchOp : public framework::OperatorBase {
// FIXME(yuyang18): Should we assume the fetch operator always generate // FIXME(yuyang18): Should we assume the fetch operator always generate
// CPU outputs? // CPU outputs?
dst_item.CopyFromTensor(src_item, platform::CPUPlace(), dev_ctx); dst_item.CopyFrom(src_item, platform::CPUPlace(), dev_ctx);
VLOG(3) << "Fetch variable " << fetch_var_name << " to " << out_name; VLOG(3) << "Fetch variable " << fetch_var_name << " to " << out_name;
} }
......
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/operators/increment_op.h"
namespace paddle {
namespace operators {
class IncrementOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"),
"Input(X) of IncrementOp should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("Out"),
"Output(Out) of IncrementOp should not be null.");
ctx->SetOutputDim("Out", ctx->GetInputDim("X"));
ctx->ShareLoD("X", /*->*/ "Out");
}
};
template <typename AttrType>
class IncrementOpMaker : public framework::OpProtoAndCheckerMaker {
public:
IncrementOpMaker(framework::OpProto *proto,
framework::OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "(Tensor) The input tensor of increment operator");
AddOutput("Out", "(Tensor) The output tensor of increment operator.");
AddComment(R"DOC(Increment operator
The equation is: Out = X + step
)DOC");
AddAttr<AttrType>("step",
"The step size by which the "
"input tensor will be incremented.")
.SetDefault(1.0);
}
};
class IncrementGradOpMaker : public framework::SingleGradOpDescMaker {
public:
using framework::SingleGradOpDescMaker::SingleGradOpDescMaker;
std::unique_ptr<framework::OpDescBind> Apply() const override {
auto *grad_op = new framework::OpDescBind();
grad_op->SetType("scale");
grad_op->SetInput("X", OutputGrad("Out"));
grad_op->SetOutput("Out", InputGrad("X"));
grad_op->SetAttr("scale", 1.0f);
return std::unique_ptr<framework::OpDescBind>(grad_op);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OPERATOR(increment, ops::IncrementOp, ops::IncrementOpMaker<float>,
ops::IncrementGradOpMaker);
REGISTER_OP_CPU_KERNEL(increment,
ops::IncrementKernel<paddle::platform::CPUPlace, float>);
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/operators/increment_op.h"
REGISTER_OP_GPU_KERNEL(
increment,
paddle::operators::IncrementKernel<paddle::platform::GPUPlace, float>);
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "paddle/framework/eigen.h"
#include "paddle/framework/op_registry.h"
namespace paddle {
namespace operators {
template <typename Place, typename T, typename AttrType = T>
class IncrementKernel : public framework::OpKernel<T> {
public:
virtual void Compute(const framework::ExecutionContext& context) const {
auto* tensor = context.Output<framework::Tensor>("Out");
auto* in = context.Input<framework::Tensor>("X");
tensor->mutable_data<T>(in->place());
auto step = static_cast<T>(context.Attr<AttrType>("step"));
auto eigen_out = framework::EigenVector<T>::Flatten(*tensor);
auto eigen_in = framework::EigenVector<T>::Flatten(*in);
auto& place = context.GetEigenDevice<Place>();
eigen_out.device(place) = eigen_in + step;
}
};
} // namespace operators
} // namespace paddle
...@@ -64,7 +64,7 @@ void testIm2col() { ...@@ -64,7 +64,7 @@ void testIm2col() {
if (paddle::platform::is_cpu_place(*place)) { if (paddle::platform::is_cpu_place(*place)) {
input = input_tmp; input = input_tmp;
} else { } else {
input.CopyFrom<float>(input_tmp, *place, *context); input.CopyFrom(input_tmp, *place, *context);
} }
output_cfo.mutable_data<float>( output_cfo.mutable_data<float>(
{1, filter_size, filter_size, output_height, output_width}, *place); {1, filter_size, filter_size, output_height, output_width}, *place);
...@@ -85,8 +85,7 @@ void testIm2col() { ...@@ -85,8 +85,7 @@ void testIm2col() {
if (paddle::platform::is_cpu_place(*place)) { if (paddle::platform::is_cpu_place(*place)) {
out_cfo_ptr = output_cfo.data<float>(); out_cfo_ptr = output_cfo.data<float>();
} else { } else {
output_tmp.CopyFrom<float>(output_cfo, paddle::platform::CPUPlace(), output_tmp.CopyFrom(output_cfo, paddle::platform::CPUPlace(), *context);
*context);
out_cfo_ptr = output_tmp.data<float>(); out_cfo_ptr = output_tmp.data<float>();
} }
EXPECT_EQ(out_cfo_ptr[0], 0); EXPECT_EQ(out_cfo_ptr[0], 0);
...@@ -102,8 +101,7 @@ void testIm2col() { ...@@ -102,8 +101,7 @@ void testIm2col() {
if (paddle::platform::is_cpu_place(*place)) { if (paddle::platform::is_cpu_place(*place)) {
out_ocf_ptr = output_ocf.data<float>(); out_ocf_ptr = output_ocf.data<float>();
} else { } else {
output_tmp.CopyFrom<float>(output_ocf, paddle::platform::CPUPlace(), output_tmp.CopyFrom(output_ocf, paddle::platform::CPUPlace(), *context);
*context);
out_ocf_ptr = output_tmp.data<float>(); out_ocf_ptr = output_tmp.data<float>();
} }
EXPECT_EQ(out_ocf_ptr[0], 0); EXPECT_EQ(out_ocf_ptr[0], 0);
......
...@@ -16,15 +16,15 @@ TEST(math_function, notrans_mul_trans) { ...@@ -16,15 +16,15 @@ TEST(math_function, notrans_mul_trans) {
auto* gpu_place = new paddle::platform::GPUPlace(0); auto* gpu_place = new paddle::platform::GPUPlace(0);
paddle::platform::CUDADeviceContext context(*gpu_place); paddle::platform::CUDADeviceContext context(*gpu_place);
input1_gpu.CopyFrom<float>(input1, *gpu_place, context); input1_gpu.CopyFrom(input1, *gpu_place, context);
input2_gpu.CopyFrom<float>(input1, *gpu_place, context); input2_gpu.CopyFrom(input1, *gpu_place, context);
out_gpu.mutable_data<float>({2, 2}, *gpu_place); out_gpu.mutable_data<float>({2, 2}, *gpu_place);
paddle::operators::math::matmul<paddle::platform::GPUPlace, float>( paddle::operators::math::matmul<paddle::platform::GPUPlace, float>(
context, input1_gpu, false, input2_gpu, true, 1, &out_gpu, 0); context, input1_gpu, false, input2_gpu, true, 1, &out_gpu, 0);
out.CopyFrom<float>(out_gpu, *cpu_place, context); out.CopyFrom(out_gpu, *cpu_place, context);
float* out_ptr = out.data<float>(); float* out_ptr = out.data<float>();
context.Wait(); context.Wait();
...@@ -50,15 +50,15 @@ TEST(math_function, trans_mul_notrans) { ...@@ -50,15 +50,15 @@ TEST(math_function, trans_mul_notrans) {
auto* gpu_place = new paddle::platform::GPUPlace(0); auto* gpu_place = new paddle::platform::GPUPlace(0);
paddle::platform::CUDADeviceContext context(*gpu_place); paddle::platform::CUDADeviceContext context(*gpu_place);
input1_gpu.CopyFrom<float>(input1, *gpu_place, context); input1_gpu.CopyFrom(input1, *gpu_place, context);
input2_gpu.CopyFrom<float>(input1, *gpu_place, context); input2_gpu.CopyFrom(input1, *gpu_place, context);
out_gpu.mutable_data<float>({3, 3}, *gpu_place); out_gpu.mutable_data<float>({3, 3}, *gpu_place);
paddle::operators::math::matmul<paddle::platform::GPUPlace, float>( paddle::operators::math::matmul<paddle::platform::GPUPlace, float>(
context, input1_gpu, true, input2_gpu, false, 1, &out_gpu, 0); context, input1_gpu, true, input2_gpu, false, 1, &out_gpu, 0);
out.CopyFrom<float>(out_gpu, *cpu_place, context); out.CopyFrom(out_gpu, *cpu_place, context);
float* out_ptr = out.data<float>(); float* out_ptr = out.data<float>();
context.Wait(); context.Wait();
...@@ -99,9 +99,9 @@ TEST(math_function, gemm_notrans_cublas) { ...@@ -99,9 +99,9 @@ TEST(math_function, gemm_notrans_cublas) {
auto* gpu_place = new paddle::platform::GPUPlace(0); auto* gpu_place = new paddle::platform::GPUPlace(0);
paddle::platform::CUDADeviceContext context(*gpu_place); paddle::platform::CUDADeviceContext context(*gpu_place);
input1_gpu.CopyFrom<float>(input1, *gpu_place, context); input1_gpu.CopyFrom(input1, *gpu_place, context);
input2_gpu.CopyFrom<float>(input2, *gpu_place, context); input2_gpu.CopyFrom(input2, *gpu_place, context);
input3_gpu.CopyFrom<float>(input3, *gpu_place, context); input3_gpu.CopyFrom(input3, *gpu_place, context);
float* a = input1_gpu.data<float>(); float* a = input1_gpu.data<float>();
float* b = input2_gpu.data<float>(); float* b = input2_gpu.data<float>();
float* c = input3_gpu.mutable_data<float>(*gpu_place); float* c = input3_gpu.mutable_data<float>(*gpu_place);
...@@ -109,7 +109,7 @@ TEST(math_function, gemm_notrans_cublas) { ...@@ -109,7 +109,7 @@ TEST(math_function, gemm_notrans_cublas) {
paddle::operators::math::gemm<paddle::platform::GPUPlace, float>( paddle::operators::math::gemm<paddle::platform::GPUPlace, float>(
context, false, false, m, n, k, 1, a, 3, b + 1, 4, 1, c + 1, 4); context, false, false, m, n, k, 1, a, 3, b + 1, 4, 1, c + 1, 4);
input3.CopyFrom<float>(input3_gpu, *cpu_place, context); input3.CopyFrom(input3_gpu, *cpu_place, context);
// numpy code: // numpy code:
// a = np.arange(6).reshape(2, 3) // a = np.arange(6).reshape(2, 3)
...@@ -154,9 +154,9 @@ TEST(math_function, gemm_trans_cublas) { ...@@ -154,9 +154,9 @@ TEST(math_function, gemm_trans_cublas) {
auto* gpu_place = new paddle::platform::GPUPlace(0); auto* gpu_place = new paddle::platform::GPUPlace(0);
paddle::platform::CUDADeviceContext context(*gpu_place); paddle::platform::CUDADeviceContext context(*gpu_place);
input1_gpu.CopyFrom<float>(input1, *gpu_place, context); input1_gpu.CopyFrom(input1, *gpu_place, context);
input2_gpu.CopyFrom<float>(input2, *gpu_place, context); input2_gpu.CopyFrom(input2, *gpu_place, context);
input3_gpu.CopyFrom<float>(input3, *gpu_place, context); input3_gpu.CopyFrom(input3, *gpu_place, context);
float* a = input1_gpu.data<float>(); float* a = input1_gpu.data<float>();
float* b = input2_gpu.data<float>(); float* b = input2_gpu.data<float>();
float* c = input3_gpu.mutable_data<float>(*gpu_place); float* c = input3_gpu.mutable_data<float>(*gpu_place);
...@@ -164,7 +164,7 @@ TEST(math_function, gemm_trans_cublas) { ...@@ -164,7 +164,7 @@ TEST(math_function, gemm_trans_cublas) {
paddle::operators::math::gemm<paddle::platform::GPUPlace, float>( paddle::operators::math::gemm<paddle::platform::GPUPlace, float>(
context, false, true, m, n, k, 1, a, 3, b + 3, 3, 1, c + 1, 4); context, false, true, m, n, k, 1, a, 3, b + 3, 3, 1, c + 1, 4);
input3.CopyFrom<float>(input3_gpu, *cpu_place, context); input3.CopyFrom(input3_gpu, *cpu_place, context);
context.Wait(); context.Wait();
EXPECT_EQ(input3_ptr[0], 0); EXPECT_EQ(input3_ptr[0], 0);
......
...@@ -67,7 +67,7 @@ TEST(selected_rows_functor, gpu_add) { ...@@ -67,7 +67,7 @@ TEST(selected_rows_functor, gpu_add) {
EXPECT_EQ(out_rows[6], 9); EXPECT_EQ(out_rows[6], 9);
Tensor out_cpu; Tensor out_cpu;
out_cpu.CopyFrom<float>(*out_value, cpu_place, ctx); out_cpu.CopyFrom(*out_value, cpu_place, ctx);
ctx.Wait(); ctx.Wait();
auto* out_cpu_data = out_cpu.data<float>(); auto* out_cpu_data = out_cpu.data<float>();
...@@ -94,7 +94,7 @@ TEST(selected_rows_functor, gpu_add) { ...@@ -94,7 +94,7 @@ TEST(selected_rows_functor, gpu_add) {
add_tensor_functor(ctx, *output, *tensor1, tensor2.get()); add_tensor_functor(ctx, *output, *tensor1, tensor2.get());
Tensor tensor2_cpu; Tensor tensor2_cpu;
tensor2_cpu.CopyFrom<float>(*tensor2, cpu_place, ctx); tensor2_cpu.CopyFrom(*tensor2, cpu_place, ctx);
ctx.Wait(); ctx.Wait();
auto* tensor2_cpu_data = tensor2_cpu.data<float>(); auto* tensor2_cpu_data = tensor2_cpu.data<float>();
......
...@@ -78,7 +78,7 @@ void testVol2col() { ...@@ -78,7 +78,7 @@ void testVol2col() {
if (paddle::platform::is_cpu_place(*place)) { if (paddle::platform::is_cpu_place(*place)) {
input = input_tmp; input = input_tmp;
} else { } else {
input.CopyFrom<float>(input_tmp, *place, *context); input.CopyFrom(input_tmp, *place, *context);
} }
output.mutable_data<float>({1, filter_size, filter_size, filter_size, output.mutable_data<float>({1, filter_size, filter_size, filter_size,
output_depth, output_height, output_width}, output_depth, output_height, output_width},
...@@ -93,7 +93,7 @@ void testVol2col() { ...@@ -93,7 +93,7 @@ void testVol2col() {
if (paddle::platform::is_cpu_place(*place)) { if (paddle::platform::is_cpu_place(*place)) {
out_cfo_ptr = output.data<float>(); out_cfo_ptr = output.data<float>();
} else { } else {
output_tmp.CopyFrom<float>(output, paddle::platform::CPUPlace(), *context); output_tmp.CopyFrom(output, paddle::platform::CPUPlace(), *context);
out_cfo_ptr = output_tmp.data<float>(); out_cfo_ptr = output_tmp.data<float>();
} }
...@@ -107,7 +107,7 @@ void testVol2col() { ...@@ -107,7 +107,7 @@ void testVol2col() {
if (paddle::platform::is_cpu_place(*place)) { if (paddle::platform::is_cpu_place(*place)) {
input = input_tmp; input = input_tmp;
} else { } else {
input.CopyFrom<float>(input_tmp, *place, *context); input.CopyFrom(input_tmp, *place, *context);
} }
paddle::operators::math::Col2VolFunctor<Place, float> col2vol; paddle::operators::math::Col2VolFunctor<Place, float> col2vol;
...@@ -118,7 +118,7 @@ void testVol2col() { ...@@ -118,7 +118,7 @@ void testVol2col() {
if (paddle::platform::is_cpu_place(*place)) { if (paddle::platform::is_cpu_place(*place)) {
in_ptr = input.data<float>(); in_ptr = input.data<float>();
} else { } else {
input_tmp.CopyFrom<float>(input, paddle::platform::CPUPlace(), *context); input_tmp.CopyFrom(input, paddle::platform::CPUPlace(), *context);
in_ptr = input_tmp.data<float>(); in_ptr = input_tmp.data<float>();
} }
......
...@@ -46,7 +46,7 @@ class MatMulKernel : public framework::OpKernel<T> { ...@@ -46,7 +46,7 @@ class MatMulKernel : public framework::OpKernel<T> {
template <typename T> template <typename T>
inline Tensor Reshape(const Tensor& input, const DDim& dims) { inline Tensor Reshape(const Tensor& input, const DDim& dims) {
Tensor output; Tensor output;
output.ShareDataWith<T>(input); output.ShareDataWith(input);
output.Resize(dims); output.Resize(dims);
return output; return output;
} }
...@@ -56,7 +56,7 @@ inline Tensor Reshape(const Tensor& input, const DDim& dims) { ...@@ -56,7 +56,7 @@ inline Tensor Reshape(const Tensor& input, const DDim& dims) {
template <typename T> template <typename T>
Tensor CombineBatchAndM(const Tensor& input) { Tensor CombineBatchAndM(const Tensor& input) {
Tensor output; Tensor output;
output.ShareDataWith<T>(input); output.ShareDataWith(input);
auto in_dims = input.dims(); auto in_dims = input.dims();
if (in_dims.size() == 3) { if (in_dims.size() == 3) {
std::vector<int64_t> out_dims = {in_dims[0] * in_dims[1], in_dims[2]}; std::vector<int64_t> out_dims = {in_dims[0] * in_dims[1], in_dims[2]};
...@@ -80,7 +80,7 @@ Tensor CombineBatchAndN(const framework::ExecutionContext& context, ...@@ -80,7 +80,7 @@ Tensor CombineBatchAndN(const framework::ExecutionContext& context,
std::vector<int64_t> out_dims = {in_dims[1], in_dims[0] * in_dims[2]}; std::vector<int64_t> out_dims = {in_dims[1], in_dims[0] * in_dims[2]};
output.Resize(make_ddim(out_dims)); output.Resize(make_ddim(out_dims));
} else { } else {
output.ShareDataWith<T>(input); output.ShareDataWith(input);
} }
return output; return output;
} }
......
...@@ -75,12 +75,17 @@ class MomentumOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -75,12 +75,17 @@ class MomentumOpMaker : public framework::OpProtoAndCheckerMaker {
AddOutput("VelocityOut", "(Tensor) Output updated velocity"); AddOutput("VelocityOut", "(Tensor) Output updated velocity");
AddAttr<float>("mu", "(float) Momentum coefficient"); AddAttr<float>("mu", "(float) Momentum coefficient");
AddAttr<bool>("useNesterov", "(bool) Use Nesterov Momentum")
.SetDefault(false);
AddComment(R"DOC( AddComment(R"DOC(
Momentum Algorithm (momentum). Momentum Algorithm with a flag for Nestrov Moemntum (momentum).
velocity = mu * velocity + gradient velocity = mu * velocity + gradient
param = param - learning_rate * velocity if (use_nesterov):
param = param - gradient * learning_rate + mu * velocity * learning_rate
else:
param = param - learning_rate * velocity
)DOC"); )DOC");
} }
......
...@@ -34,6 +34,7 @@ class MomentumOpKernel : public framework::OpKernel<T> { ...@@ -34,6 +34,7 @@ class MomentumOpKernel : public framework::OpKernel<T> {
velocity_out->mutable_data<T>(ctx.GetPlace()); velocity_out->mutable_data<T>(ctx.GetPlace());
float mu = ctx.Attr<float>("mu"); float mu = ctx.Attr<float>("mu");
bool use_nesterov = ctx.Attr<bool>("useNesterov");
auto p_out = framework::EigenVector<T>::Flatten(*param_out); auto p_out = framework::EigenVector<T>::Flatten(*param_out);
auto v_out = framework::EigenVector<T>::Flatten(*velocity_out); auto v_out = framework::EigenVector<T>::Flatten(*velocity_out);
...@@ -46,8 +47,14 @@ class MomentumOpKernel : public framework::OpKernel<T> { ...@@ -46,8 +47,14 @@ class MomentumOpKernel : public framework::OpKernel<T> {
auto place = ctx.GetEigenDevice<Place>(); auto place = ctx.GetEigenDevice<Place>();
Eigen::DSizes<int, 1> grad_dsize(grad->numel()); Eigen::DSizes<int, 1> grad_dsize(grad->numel());
v_out.device(place) = v * mu + g; v_out.device(place) = v * mu + g;
p_out.device(place) = p - lr.broadcast(grad_dsize) * v_out; if (use_nesterov) {
p_out.device(place) = p - g * lr.broadcast(grad_dsize) +
v_out * mu * lr.broadcast(grad_dsize);
} else {
p_out.device(place) = p - lr.broadcast(grad_dsize) * v_out;
}
} }
}; };
......
...@@ -36,12 +36,12 @@ class MulKernel : public framework::OpKernel<T> { ...@@ -36,12 +36,12 @@ class MulKernel : public framework::OpKernel<T> {
Tensor* z = context.Output<Tensor>("Out"); Tensor* z = context.Output<Tensor>("Out");
const Tensor x_matrix = const Tensor x_matrix =
x->dims().size() > 2 x->dims().size() > 2
? framework::ReshapeToMatrix<T>( ? framework::ReshapeToMatrix(
*x, context.template Attr<int>("x_num_col_dims")) *x, context.template Attr<int>("x_num_col_dims"))
: *x; : *x;
const Tensor y_matrix = const Tensor y_matrix =
y->dims().size() > 2 y->dims().size() > 2
? framework::ReshapeToMatrix<T>( ? framework::ReshapeToMatrix(
*y, context.template Attr<int>("y_num_col_dims")) *y, context.template Attr<int>("y_num_col_dims"))
: *y; : *y;
...@@ -59,30 +59,30 @@ class MulGradKernel : public framework::OpKernel<T> { ...@@ -59,30 +59,30 @@ class MulGradKernel : public framework::OpKernel<T> {
int y_num_col_dims = ctx.template Attr<int>("y_num_col_dims"); int y_num_col_dims = ctx.template Attr<int>("y_num_col_dims");
const Tensor* x = ctx.Input<Tensor>("X"); const Tensor* x = ctx.Input<Tensor>("X");
const Tensor* y = ctx.Input<Tensor>("Y"); const Tensor* y = ctx.Input<Tensor>("Y");
const Tensor x_matrix = const Tensor x_matrix = x->dims().size() > 2
x->dims().size() > 2 ? framework::ReshapeToMatrix<T>(*x, x_num_col_dims) ? framework::ReshapeToMatrix(*x, x_num_col_dims)
: *x; : *x;
const Tensor y_matrix = const Tensor y_matrix = y->dims().size() > 2
y->dims().size() > 2 ? framework::ReshapeToMatrix<T>(*y, y_num_col_dims) ? framework::ReshapeToMatrix(*y, y_num_col_dims)
: *y; : *y;
const Tensor* dout = ctx.Input<Tensor>(framework::GradVarName("Out")); const Tensor* dout = ctx.Input<Tensor>(framework::GradVarName("Out"));
Tensor* dx = ctx.Output<Tensor>(framework::GradVarName("X")); Tensor* dx = ctx.Output<Tensor>(framework::GradVarName("X"));
Tensor* dy = ctx.Output<Tensor>(framework::GradVarName("Y")); Tensor* dy = ctx.Output<Tensor>(framework::GradVarName("Y"));
if (dx) { if (dx) {
dx->mutable_data<T>(ctx.GetPlace()); dx->mutable_data<T>(ctx.GetPlace());
Tensor dx_matrix = dx->dims().size() > 2 ? framework::ReshapeToMatrix<T>( Tensor dx_matrix = dx->dims().size() > 2
*dx, x_num_col_dims) ? framework::ReshapeToMatrix(*dx, x_num_col_dims)
: *dx; : *dx;
// dx = dout * y'. dx: M x K, dout : M x N, y : K x N // dx = dout * y'. dx: M x K, dout : M x N, y : K x N
math::matmul<Place, T>(ctx.device_context(), *dout, false, y_matrix, true, math::matmul<Place, T>(ctx.device_context(), *dout, false, y_matrix, true,
1, &dx_matrix, 0); 1, &dx_matrix, 0);
} }
if (dy) { if (dy) {
dy->mutable_data<T>(ctx.GetPlace()); dy->mutable_data<T>(ctx.GetPlace());
Tensor dy_matrix = dy->dims().size() > 2 ? framework::ReshapeToMatrix<T>( Tensor dy_matrix = dy->dims().size() > 2
*dy, y_num_col_dims) ? framework::ReshapeToMatrix(*dy, y_num_col_dims)
: *dy; : *dy;
// dy = x' * dout. dy K x N, dout : M x N, x : M x K // dy = x' * dout. dy K x N, dout : M x N, x : M x K
math::matmul<Place, T>(ctx.device_context(), x_matrix, true, *dout, false, math::matmul<Place, T>(ctx.device_context(), x_matrix, true, *dout, false,
1, &dy_matrix, 0); 1, &dy_matrix, 0);
......
...@@ -33,8 +33,7 @@ class MultiplexGPUKernel : public framework::OpKernel<T> { ...@@ -33,8 +33,7 @@ class MultiplexGPUKernel : public framework::OpKernel<T> {
auto cols = ins[0]->numel() / rows; auto cols = ins[0]->numel() / rows;
// copy index to cpu // copy index to cpu
Tensor index_t_cpu; Tensor index_t_cpu;
index_t_cpu.CopyFrom<int32_t>(*ids, platform::CPUPlace(), index_t_cpu.CopyFrom(*ids, platform::CPUPlace(), ctx.device_context());
ctx.device_context());
auto* index = index_t_cpu.data<int32_t>(); auto* index = index_t_cpu.data<int32_t>();
auto stream = reinterpret_cast<const platform::CUDADeviceContext&>( auto stream = reinterpret_cast<const platform::CUDADeviceContext&>(
ctx.device_context()) ctx.device_context())
...@@ -71,8 +70,7 @@ class MultiplexGradGPUKernel : public framework::OpKernel<T> { ...@@ -71,8 +70,7 @@ class MultiplexGradGPUKernel : public framework::OpKernel<T> {
auto cols = ins[0]->numel() / rows; auto cols = ins[0]->numel() / rows;
// copy index to cpu // copy index to cpu
Tensor index_t_cpu; Tensor index_t_cpu;
index_t_cpu.CopyFrom<int32_t>(*ids, platform::CPUPlace(), index_t_cpu.CopyFrom(*ids, platform::CPUPlace(), ctx.device_context());
ctx.device_context());
auto* index = index_t_cpu.data<int32_t>(); auto* index = index_t_cpu.data<int32_t>();
auto stream = reinterpret_cast<const platform::CUDADeviceContext&>( auto stream = reinterpret_cast<const platform::CUDADeviceContext&>(
......
...@@ -42,7 +42,7 @@ void RecurrentAlgorithm::Run(const Scope& scope, ...@@ -42,7 +42,7 @@ void RecurrentAlgorithm::Run(const Scope& scope,
for (size_t step_id = 0; step_id < seq_len; step_id++) { for (size_t step_id = 0; step_id < seq_len; step_id++) {
if (step_id > 0) { if (step_id > 0) {
rnn::LinkMemories(step_scopes, arg_->memories, step_id, -1); rnn::LinkMemories(step_scopes, arg_->states, step_id, -1);
} }
(*stepnet_)->Run(*step_scopes[step_id], dev_ctx); (*stepnet_)->Run(*step_scopes[step_id], dev_ctx);
} }
...@@ -59,7 +59,8 @@ void RecurrentAlgorithm::CreateScopes(const Scope& scope, ...@@ -59,7 +59,8 @@ void RecurrentAlgorithm::CreateScopes(const Scope& scope,
// Now all variables in scope must be created outside of op. // Now all variables in scope must be created outside of op.
PADDLE_ENFORCE_NOT_NULL(stepnet_); PADDLE_ENFORCE_NOT_NULL(stepnet_);
PADDLE_ENFORCE(!(*stepnet_)->Outputs().empty(), "stepnet_ op has no outputs"); PADDLE_ENFORCE(!(*stepnet_)->Outputs().empty(),
"step_unit_ op has no outputs");
if (seq_len > step_scopes->size()) { if (seq_len > step_scopes->size()) {
for (size_t i = step_scopes->size(); i < seq_len; ++i) { for (size_t i = step_scopes->size(); i < seq_len; ++i) {
...@@ -86,7 +87,7 @@ void RecurrentAlgorithm::CreateScopes(const Scope& scope, ...@@ -86,7 +87,7 @@ void RecurrentAlgorithm::CreateScopes(const Scope& scope,
} }
void RecurrentAlgorithm::InitMemories(Scope* step_scope) const { void RecurrentAlgorithm::InitMemories(Scope* step_scope) const {
for (auto& attr : arg_->memories) { for (auto& attr : arg_->states) {
auto* pre_mem = step_scope->Var(attr.pre_var)->GetMutable<LoDTensor>(); auto* pre_mem = step_scope->Var(attr.pre_var)->GetMutable<LoDTensor>();
PADDLE_ENFORCE(step_scope->FindVar(attr.boot_var) != nullptr, PADDLE_ENFORCE(step_scope->FindVar(attr.boot_var) != nullptr,
"memory [%s]'s boot variable [%s] not exists", attr.var, "memory [%s]'s boot variable [%s] not exists", attr.var,
...@@ -95,17 +96,17 @@ void RecurrentAlgorithm::InitMemories(Scope* step_scope) const { ...@@ -95,17 +96,17 @@ void RecurrentAlgorithm::InitMemories(Scope* step_scope) const {
step_scope->FindVar(attr.boot_var)->GetMutable<LoDTensor>(); step_scope->FindVar(attr.boot_var)->GetMutable<LoDTensor>();
pre_mem->Resize(boot_mem->dims()); pre_mem->Resize(boot_mem->dims());
PADDLE_ENFORCE_EQ(pre_mem->dims().size(), 2); PADDLE_ENFORCE_EQ(pre_mem->dims().size(), 2);
pre_mem->ShareDataWith<float>(*boot_mem); pre_mem->ShareDataWith(*boot_mem);
} }
} }
const rnn::ArgumentName RecurrentOp::kArgName{ const rnn::ArgumentName RecurrentOp::kArgName{
"step_net", "step_scopes", "inlinks", "outlinks", "step_net", "step_scopes", "inputs", "outputs",
"memories", "pre_memories", "boot_memories"}; "states", "ex_states", "initial_states"};
const rnn::ArgumentName RecurrentGradientOp::kArgName{ const rnn::ArgumentName RecurrentGradientOp::kArgName{
"step_net", "step_scopes@GRAD", "outlinks@GRAD", "inlinks@GRAD", "step_net", "step_scopes@GRAD", "outputs@GRAD", "inputs@GRAD",
"memories", "pre_memories", "boot_memories@GRAD"}; "states", "ex_states", "initial_states@GRAD"};
RecurrentOp::RecurrentOp(const std::string& type, RecurrentOp::RecurrentOp(const std::string& type,
const framework::VariableNameMap& inputs, const framework::VariableNameMap& inputs,
...@@ -127,7 +128,7 @@ class RecurrentAlgorithmProtoAndCheckerMaker ...@@ -127,7 +128,7 @@ class RecurrentAlgorithmProtoAndCheckerMaker
AddInput(name.inlinks, AddInput(name.inlinks,
"the inputs that need to be segmented for each step.") "the inputs that need to be segmented for each step.")
.AsDuplicable(); .AsDuplicable();
AddInput(name.boot_memories, "variables to initialize memories.") AddInput(name.initial_states, "variables to initialize states.")
.AsDuplicable(); .AsDuplicable();
AddOutput(name.outlinks, "the outputs that need to concated for all steps.") AddOutput(name.outlinks, "the outputs that need to concated for all steps.")
...@@ -135,9 +136,8 @@ class RecurrentAlgorithmProtoAndCheckerMaker ...@@ -135,9 +136,8 @@ class RecurrentAlgorithmProtoAndCheckerMaker
AddOutput(name.step_scopes, "step scopes"); AddOutput(name.step_scopes, "step scopes");
// Attributes stored in AttributeMap // Attributes stored in AttributeMap
AddAttr<std::vector<std::string>>(name.pre_memories, AddAttr<std::vector<std::string>>(name.ex_states, "names of pre-states");
"names of pre-memories"); AddAttr<std::vector<std::string>>(name.states, "names of states");
AddAttr<std::vector<std::string>>(name.memories, "names of memories");
AddComment("This is a recurrent group operator."); AddComment("This is a recurrent group operator.");
} }
...@@ -152,7 +152,7 @@ void RecurrentGradientAlgorithm::Run( ...@@ -152,7 +152,7 @@ void RecurrentGradientAlgorithm::Run(
rnn::SegmentInputs(step_scopes, arg_->inlinks, seq_len); rnn::SegmentInputs(step_scopes, arg_->inlinks, seq_len);
for (int step_id = seq_len - 1; step_id >= 0; --step_id) { for (int step_id = seq_len - 1; step_id >= 0; --step_id) {
if (static_cast<size_t>(step_id) != seq_len - 1) { if (static_cast<size_t>(step_id) != seq_len - 1) {
rnn::LinkMemories(step_scopes, arg_->memories, step_id, 1); rnn::LinkMemories(step_scopes, arg_->states, step_id, 1);
} }
(*stepnet_)->Run(*step_scopes[step_id], dev_ctx); (*stepnet_)->Run(*step_scopes[step_id], dev_ctx);
} }
...@@ -162,7 +162,7 @@ void RecurrentGradientAlgorithm::Run( ...@@ -162,7 +162,7 @@ void RecurrentGradientAlgorithm::Run(
void RecurrentGradientAlgorithm::LinkBootMemoryGradients( void RecurrentGradientAlgorithm::LinkBootMemoryGradients(
Scope* step_scope) const { Scope* step_scope) const {
for (auto& attr : arg_->memories) { for (auto& attr : arg_->states) {
PADDLE_ENFORCE(step_scope->FindVar(attr.var) != nullptr, PADDLE_ENFORCE(step_scope->FindVar(attr.var) != nullptr,
"memory variable [%s] does not exists", attr.var); "memory variable [%s] does not exists", attr.var);
PADDLE_ENFORCE(step_scope->FindVar(attr.boot_var) != nullptr, PADDLE_ENFORCE(step_scope->FindVar(attr.boot_var) != nullptr,
...@@ -171,7 +171,7 @@ void RecurrentGradientAlgorithm::LinkBootMemoryGradients( ...@@ -171,7 +171,7 @@ void RecurrentGradientAlgorithm::LinkBootMemoryGradients(
auto* boot_mem_grad = auto* boot_mem_grad =
step_scope->Var(attr.boot_var)->GetMutable<LoDTensor>(); step_scope->Var(attr.boot_var)->GetMutable<LoDTensor>();
boot_mem_grad->Resize(mem_grad->dims()); boot_mem_grad->Resize(mem_grad->dims());
boot_mem_grad->ShareDataWith<float>(*mem_grad); boot_mem_grad->ShareDataWith(*mem_grad);
} }
} }
......
...@@ -33,7 +33,7 @@ class ReshapeKernel : public framework::OpKernel<T> { ...@@ -33,7 +33,7 @@ class ReshapeKernel : public framework::OpKernel<T> {
std::transform(shape.begin(), shape.end(), shape_int64.begin(), std::transform(shape.begin(), shape.end(), shape_int64.begin(),
[](int a) { return static_cast<int64_t>(a); }); [](int a) { return static_cast<int64_t>(a); });
auto out_dims = framework::make_ddim(shape_int64); auto out_dims = framework::make_ddim(shape_int64);
out->CopyFrom<T>(*in, ctx.GetPlace(), ctx.device_context()); out->CopyFrom(*in, ctx.GetPlace(), ctx.device_context());
out->Resize(out_dims); out->Resize(out_dims);
} }
}; };
...@@ -47,7 +47,7 @@ class ReshapeGradKernel : public framework::OpKernel<T> { ...@@ -47,7 +47,7 @@ class ReshapeGradKernel : public framework::OpKernel<T> {
d_x->mutable_data<T>(ctx.GetPlace()); d_x->mutable_data<T>(ctx.GetPlace());
auto in_dims = d_x->dims(); auto in_dims = d_x->dims();
d_x->CopyFrom<T>(*d_out, ctx.GetPlace(), ctx.device_context()); d_x->CopyFrom(*d_out, ctx.GetPlace(), ctx.device_context());
d_x->Resize(in_dims); d_x->Resize(in_dims);
} }
}; };
......
...@@ -36,14 +36,14 @@ void SegmentInputs(const std::vector<Scope*>& step_scopes, ...@@ -36,14 +36,14 @@ void SegmentInputs(const std::vector<Scope*>& step_scopes,
LoDTensor* input = input_var->GetMutable<LoDTensor>(); LoDTensor* input = input_var->GetMutable<LoDTensor>();
f::DDim dims = input->dims(); f::DDim dims = input->dims();
PADDLE_ENFORCE_EQ(static_cast<size_t>(dims[0]), seq_len, PADDLE_ENFORCE_EQ(static_cast<size_t>(dims[0]), seq_len,
"all the inlinks be the same length"); "all the inputs be the same length");
f::DDim step_dims = slice_ddim(dims, 1, dims.size()); f::DDim step_dims = slice_ddim(dims, 1, dims.size());
for (size_t j = 0; j < seq_len; j++) { for (size_t j = 0; j < seq_len; j++) {
Tensor* step_input = Tensor* step_input =
step_scopes[j]->Var(inlinks[i])->GetMutable<Tensor>(); step_scopes[j]->Var(inlinks[i])->GetMutable<Tensor>();
// The input of operators of each step is Tensor here. // The input of operators of each step is Tensor here.
// Maybe need to modify Slice function. // Maybe need to modify Slice function.
*step_input = input->Slice<float>(j, j + 1); *step_input = input->Slice(j, j + 1);
step_input->Resize(step_dims); step_input->Resize(step_dims);
} }
} }
...@@ -71,14 +71,14 @@ void ConcatOutputs(const std::vector<Scope*>& step_scopes, ...@@ -71,14 +71,14 @@ void ConcatOutputs(const std::vector<Scope*>& step_scopes,
step_scopes[j]->FindVar(outlinks[i])->GetMutable<LoDTensor>(); step_scopes[j]->FindVar(outlinks[i])->GetMutable<LoDTensor>();
// TODO(luotao02) data type and platform::DeviceContext() should set // TODO(luotao02) data type and platform::DeviceContext() should set
// correctly // correctly
(output->Slice<float>(j, j + 1)) (output->Slice(j, j + 1))
.CopyFrom<float>(*step_output, platform::CPUPlace(), ctx); .CopyFrom(*step_output, platform::CPUPlace(), ctx);
} }
} }
} }
void LinkMemories(const std::vector<Scope*>& scopes, void LinkMemories(const std::vector<Scope*>& scopes,
const std::vector<rnn::MemoryAttr>& memories, const std::vector<rnn::StateAttr>& memories,
const size_t step_id, const int offset) { const size_t step_id, const int offset) {
PADDLE_ENFORCE_LT(step_id, scopes.size(), PADDLE_ENFORCE_LT(step_id, scopes.size(),
"step [%d] is out of range of step scopes' size [%d]", "step [%d] is out of range of step scopes' size [%d]",
...@@ -95,7 +95,7 @@ void LinkMemories(const std::vector<Scope*>& scopes, ...@@ -95,7 +95,7 @@ void LinkMemories(const std::vector<Scope*>& scopes,
auto* mem = scope->FindVar(attr.pre_var)->GetMutable<LoDTensor>(); auto* mem = scope->FindVar(attr.pre_var)->GetMutable<LoDTensor>();
auto* linked_mem = linked_scope->FindVar(attr.var)->GetMutable<LoDTensor>(); auto* linked_mem = linked_scope->FindVar(attr.var)->GetMutable<LoDTensor>();
mem->Resize(linked_mem->dims()); mem->Resize(linked_mem->dims());
mem->ShareDataWith<float>(*linked_mem); mem->ShareDataWith(*linked_mem);
} }
} }
...@@ -106,26 +106,26 @@ void InitArgument(const ArgumentName& name, Argument* arg, ...@@ -106,26 +106,26 @@ void InitArgument(const ArgumentName& name, Argument* arg,
arg->inlinks = op.Inputs(name.inlinks); arg->inlinks = op.Inputs(name.inlinks);
arg->outlinks = op.Outputs(name.outlinks); arg->outlinks = op.Outputs(name.outlinks);
auto& boot_memories = auto& boot_memories = is_grad ? op.Outputs(name.initial_states)
is_grad ? op.Outputs(name.boot_memories) : op.Inputs(name.boot_memories); : op.Inputs(name.initial_states);
// attributes // attributes
auto& memories = op.Attr<std::vector<std::string>>(name.memories); auto& memories = op.Attr<std::vector<std::string>>(name.states);
auto& pre_memories = op.Attr<std::vector<std::string>>(name.pre_memories); auto& pre_memories = op.Attr<std::vector<std::string>>(name.ex_states);
PADDLE_ENFORCE(memories.size() == boot_memories.size(), PADDLE_ENFORCE(memories.size() == boot_memories.size(),
"the size of memories, boot_memories don't match:%d,%d", "the size of states, initial_states don't match:%d,%d",
memories.size(), boot_memories.size()); memories.size(), boot_memories.size());
PADDLE_ENFORCE(pre_memories.size() == boot_memories.size(), PADDLE_ENFORCE(pre_memories.size() == boot_memories.size(),
"the size of pre_memories, boot_memories don't match:%d,%d", "the size of ex_states, initial_states don't match:%d,%d",
pre_memories.size(), boot_memories.size()); pre_memories.size(), boot_memories.size());
PADDLE_ENFORCE(memories.size() > 0, "more than 1 memories should be set"); PADDLE_ENFORCE(memories.size() > 0, "more than 1 states should be set");
for (size_t i = 0; i < memories.size(); ++i) { for (size_t i = 0; i < memories.size(); ++i) {
rnn::MemoryAttr mem_attr; rnn::StateAttr mem_attr;
mem_attr.var = memories[i]; mem_attr.var = memories[i];
mem_attr.pre_var = pre_memories[i]; mem_attr.pre_var = pre_memories[i];
mem_attr.boot_var = boot_memories[i]; mem_attr.boot_var = boot_memories[i];
(arg->memories).push_back(mem_attr); (arg->states).push_back(mem_attr);
} }
} }
......
...@@ -31,7 +31,7 @@ using Scope = framework::Scope; ...@@ -31,7 +31,7 @@ using Scope = framework::Scope;
* boot memories in father scope. Other attributes are copied from Op's proto * boot memories in father scope. Other attributes are copied from Op's proto
* attributes. * attributes.
*/ */
struct MemoryAttr { struct StateAttr {
// name of current state variable // name of current state variable
std::string var; std::string var;
// name of previous step's state variable // name of previous step's state variable
...@@ -46,7 +46,7 @@ struct Argument { ...@@ -46,7 +46,7 @@ struct Argument {
std::string step_scopes; std::string step_scopes;
std::vector<std::string> inlinks; std::vector<std::string> inlinks;
std::vector<std::string> outlinks; std::vector<std::string> outlinks;
std::vector<rnn::MemoryAttr> memories; std::vector<rnn::StateAttr> states;
}; };
struct ArgumentName { struct ArgumentName {
...@@ -54,9 +54,9 @@ struct ArgumentName { ...@@ -54,9 +54,9 @@ struct ArgumentName {
std::string step_scopes; std::string step_scopes;
std::string inlinks; std::string inlinks;
std::string outlinks; std::string outlinks;
std::string memories; // the memory name std::string states; // the memory name
std::string pre_memories; // the previous memory name std::string ex_states; // the previous memory name
std::string boot_memories; // the boot memory name std::string initial_states; // the boot memory name
}; };
/** /**
...@@ -74,7 +74,7 @@ void ConcatOutputs(const std::vector<Scope*>& step_scopes, ...@@ -74,7 +74,7 @@ void ConcatOutputs(const std::vector<Scope*>& step_scopes,
const size_t seq_len, const platform::DeviceContext& ctx); const size_t seq_len, const platform::DeviceContext& ctx);
void LinkMemories(const std::vector<Scope*>& step_scopes, void LinkMemories(const std::vector<Scope*>& step_scopes,
const std::vector<MemoryAttr>& memories, const size_t step_id, const std::vector<StateAttr>& memories, const size_t step_id,
const int offset); const int offset);
void InitArgument(const ArgumentName& name, Argument* arg, void InitArgument(const ArgumentName& name, Argument* arg,
......
...@@ -30,7 +30,7 @@ class ScatterOpCUDAKernel : public framework::OpKernel<T> { ...@@ -30,7 +30,7 @@ class ScatterOpCUDAKernel : public framework::OpKernel<T> {
auto *Updates = ctx.Input<Tensor>("Updates"); auto *Updates = ctx.Input<Tensor>("Updates");
auto *Out = ctx.Output<Tensor>("Out"); auto *Out = ctx.Output<Tensor>("Out");
Out->ShareDataWith<T>(*Ref); Out->ShareDataWith(*Ref);
GPUScatterAssign<T>(ctx.device_context(), *Updates, *Index, Out); GPUScatterAssign<T>(ctx.device_context(), *Updates, *Index, Out);
} }
...@@ -48,7 +48,7 @@ class ScatterGradOpCUDAKernel : public framework::OpKernel<T> { ...@@ -48,7 +48,7 @@ class ScatterGradOpCUDAKernel : public framework::OpKernel<T> {
auto *dOut = ctx.Input<Tensor>(framework::GradVarName("Out")); auto *dOut = ctx.Input<Tensor>(framework::GradVarName("Out"));
// In place gradient: dRef = dO // In place gradient: dRef = dO
dRef->ShareDataWith<T>(*dOut); dRef->ShareDataWith(*dOut);
dUpdates->mutable_data<T>(ctx.GetPlace()); dUpdates->mutable_data<T>(ctx.GetPlace());
// Gradient by Gather: dUpdates = dO[Index] // Gradient by Gather: dUpdates = dO[Index]
GPUGather<T>(ctx.device_context(), *dOut, *Index, dUpdates); GPUGather<T>(ctx.device_context(), *dOut, *Index, dUpdates);
......
...@@ -35,7 +35,7 @@ class ScatterOpKernel : public framework::OpKernel<T> { ...@@ -35,7 +35,7 @@ class ScatterOpKernel : public framework::OpKernel<T> {
auto *Out = ctx.Output<Tensor>("Out"); auto *Out = ctx.Output<Tensor>("Out");
// In place output: Out = Ref, Out[Index] += Updates // In place output: Out = Ref, Out[Index] += Updates
Out->ShareDataWith<T>(*Ref); Out->ShareDataWith(*Ref);
// Apply ScatterUpdate: Out[index] += Updates[:] // Apply ScatterUpdate: Out[index] += Updates[:]
ScatterAssign<T>(ctx.device_context(), *Updates, *Index, Out); ScatterAssign<T>(ctx.device_context(), *Updates, *Index, Out);
} }
...@@ -53,7 +53,7 @@ class ScatterGradientOpKernel : public framework::OpKernel<T> { ...@@ -53,7 +53,7 @@ class ScatterGradientOpKernel : public framework::OpKernel<T> {
auto *dOut = ctx.Input<Tensor>(framework::GradVarName("Out")); auto *dOut = ctx.Input<Tensor>(framework::GradVarName("Out"));
// In place gradient: dRef = dO // In place gradient: dRef = dO
dRef->ShareDataWith<T>(*dOut); dRef->ShareDataWith(*dOut);
dUpdates->mutable_data<T>(ctx.GetPlace()); dUpdates->mutable_data<T>(ctx.GetPlace());
// Gradient by Gather: dUpdates += dO[Index] // Gradient by Gather: dUpdates += dO[Index]
CPUGather<T>(ctx.device_context(), *dOut, *Index, dUpdates); CPUGather<T>(ctx.device_context(), *dOut, *Index, dUpdates);
......
...@@ -87,16 +87,16 @@ class SequenceConcatOpKernel : public framework::OpKernel<T> { ...@@ -87,16 +87,16 @@ class SequenceConcatOpKernel : public framework::OpKernel<T> {
auto out_lod_level = out_lod[level]; auto out_lod_level = out_lod[level];
for (size_t i = 0; i < out_lod_level.size() - 1; ++i) { for (size_t i = 0; i < out_lod_level.size() - 1; ++i) {
Tensor out_t = out->Slice<T>(static_cast<int>(out_lod_level[i]), Tensor out_t = out->Slice(static_cast<int>(out_lod_level[i]),
static_cast<int>(out_lod_level[i + 1])); static_cast<int>(out_lod_level[i + 1]));
auto out_stride = framework::stride(out_t.dims()); auto out_stride = framework::stride(out_t.dims());
size_t offset = 0; size_t offset = 0;
for (size_t j = 0; j < n; ++j) { for (size_t j = 0; j < n; ++j) {
auto in_lod_level = ins[j]->lod()[level]; auto in_lod_level = ins[j]->lod()[level];
auto in_stride = framework::stride(ins[j]->dims()); auto in_stride = framework::stride(ins[j]->dims());
Tensor in_t = ins[j]->Slice<T>(static_cast<int>(in_lod_level[i]), Tensor in_t = ins[j]->Slice(static_cast<int>(in_lod_level[i]),
static_cast<int>(in_lod_level[i + 1])); static_cast<int>(in_lod_level[i + 1]));
size_t axis_dim = in_t.dims()[axis]; size_t axis_dim = in_t.dims()[axis];
StridedMemcpy<T>(ctx.device_context(), in_t.data<T>(), in_stride, StridedMemcpy<T>(ctx.device_context(), in_t.data<T>(), in_stride,
in_t.dims(), out_stride, out_t.data<T>() + offset); in_t.dims(), out_stride, out_t.data<T>() + offset);
...@@ -130,8 +130,8 @@ class SequenceConcatGradOpKernel : public framework::OpKernel<T> { ...@@ -130,8 +130,8 @@ class SequenceConcatGradOpKernel : public framework::OpKernel<T> {
for (size_t i = 0; i < out_lod_level.size() - 1; ++i) { for (size_t i = 0; i < out_lod_level.size() - 1; ++i) {
Tensor out_grad_t = Tensor out_grad_t =
out_grad->Slice<T>(static_cast<int>(out_lod_level[i]), out_grad->Slice(static_cast<int>(out_lod_level[i]),
static_cast<int>(out_lod_level[i + 1])); static_cast<int>(out_lod_level[i + 1]));
auto out_grad_stride = framework::stride(out_grad_t.dims()); auto out_grad_stride = framework::stride(out_grad_t.dims());
size_t offset = 0; size_t offset = 0;
...@@ -139,8 +139,8 @@ class SequenceConcatGradOpKernel : public framework::OpKernel<T> { ...@@ -139,8 +139,8 @@ class SequenceConcatGradOpKernel : public framework::OpKernel<T> {
auto x_grad_lod_level = x_grads[j]->lod()[level]; auto x_grad_lod_level = x_grads[j]->lod()[level];
auto x_grad_stride = framework::stride(x_grads[j]->dims()); auto x_grad_stride = framework::stride(x_grads[j]->dims());
Tensor x_grad_t = Tensor x_grad_t =
x_grads[j]->Slice<T>(static_cast<int>(x_grad_lod_level[i]), x_grads[j]->Slice(static_cast<int>(x_grad_lod_level[i]),
static_cast<int>(x_grad_lod_level[i + 1])); static_cast<int>(x_grad_lod_level[i + 1]));
size_t axis_dim = x_grad_t.dims()[axis]; size_t axis_dim = x_grad_t.dims()[axis];
StridedMemcpy<T>(ctx.device_context(), out_grad_t.data<T>() + offset, StridedMemcpy<T>(ctx.device_context(), out_grad_t.data<T>() + offset,
out_grad_stride, out_grad_t.dims(), x_grad_stride, out_grad_stride, out_grad_t.dims(), x_grad_stride,
......
...@@ -64,9 +64,9 @@ class SequencePoolKernel : public framework::OpKernel<T> { ...@@ -64,9 +64,9 @@ class SequencePoolKernel : public framework::OpKernel<T> {
out->mutable_data<T>(context.GetPlace()); out->mutable_data<T>(context.GetPlace());
auto place = context.GetEigenDevice<Place>(); auto place = context.GetEigenDevice<Place>();
for (int i = 0; i < static_cast<int>(lod_level_0.size()) - 1; ++i) { for (int i = 0; i < static_cast<int>(lod_level_0.size()) - 1; ++i) {
Tensor in_t = in->Slice<T>(static_cast<int>(lod_level_0[i]), Tensor in_t = in->Slice(static_cast<int>(lod_level_0[i]),
static_cast<int>(lod_level_0[i + 1])); static_cast<int>(lod_level_0[i + 1]));
Tensor out_t = out->Slice<T>(i, i + 1); Tensor out_t = out->Slice(i, i + 1);
int64_t h = static_cast<int64_t>(lod_level_0[i + 1] - lod_level_0[i]); int64_t h = static_cast<int64_t>(lod_level_0[i + 1] - lod_level_0[i]);
auto in_e = EigenMatrix<T>::From(in_t, framework::make_ddim({h, w})); auto in_e = EigenMatrix<T>::From(in_t, framework::make_ddim({h, w}));
auto out_e = EigenVector<T>::Flatten(out_t); auto out_e = EigenVector<T>::Flatten(out_t);
...@@ -116,9 +116,9 @@ class SequencePoolGradKernel : public framework::OpKernel<T> { ...@@ -116,9 +116,9 @@ class SequencePoolGradKernel : public framework::OpKernel<T> {
} }
auto place = context.GetEigenDevice<Place>(); auto place = context.GetEigenDevice<Place>();
for (int i = 0; i < static_cast<int>(lod.size()) - 1; ++i) { for (int i = 0; i < static_cast<int>(lod.size()) - 1; ++i) {
auto in_g_t = in_g->Slice<T>(static_cast<int>(lod[i]), auto in_g_t =
static_cast<int>(lod[i + 1])); in_g->Slice(static_cast<int>(lod[i]), static_cast<int>(lod[i + 1]));
auto out_g_t = out_g->Slice<T>(i, i + 1); auto out_g_t = out_g->Slice(i, i + 1);
int64_t h = static_cast<int64_t>(lod[i + 1] - lod[i]); int64_t h = static_cast<int64_t>(lod[i + 1] - lod[i]);
auto in_g_e = EigenMatrix<T>::From(in_g_t, {h, w}); auto in_g_e = EigenMatrix<T>::From(in_g_t, {h, w});
auto out_g_e = EigenMatrix<T>::From(out_g_t, {1, w}); auto out_g_e = EigenMatrix<T>::From(out_g_t, {1, w});
......
...@@ -46,8 +46,8 @@ class SequenceSoftmaxKernel : public framework::OpKernel<T> { ...@@ -46,8 +46,8 @@ class SequenceSoftmaxKernel : public framework::OpKernel<T> {
for (int i = 0; i < static_cast<int>(lod[level].size()) - 1; ++i) { for (int i = 0; i < static_cast<int>(lod[level].size()) - 1; ++i) {
int start_pos = static_cast<int>(lod[level][i]); int start_pos = static_cast<int>(lod[level][i]);
int end_pos = static_cast<int>(lod[level][i + 1]); int end_pos = static_cast<int>(lod[level][i + 1]);
Tensor x_i = x->Slice<T>(start_pos, end_pos); Tensor x_i = x->Slice(start_pos, end_pos);
Tensor out_i = out->Slice<T>(start_pos, end_pos); Tensor out_i = out->Slice(start_pos, end_pos);
// Reshape from (end_pos - start_pos) x 1UL to 1UL x (end_pos - start_pos) // Reshape from (end_pos - start_pos) x 1UL to 1UL x (end_pos - start_pos)
framework::DDim dims_i = framework::make_ddim({1UL, end_pos - start_pos}); framework::DDim dims_i = framework::make_ddim({1UL, end_pos - start_pos});
...@@ -75,9 +75,9 @@ class SequenceSoftmaxGradKernel : public framework::OpKernel<T> { ...@@ -75,9 +75,9 @@ class SequenceSoftmaxGradKernel : public framework::OpKernel<T> {
int start_pos = static_cast<int>(lod[level][i]); int start_pos = static_cast<int>(lod[level][i]);
int end_pos = static_cast<int>(lod[level][i + 1]); int end_pos = static_cast<int>(lod[level][i + 1]);
Tensor out_i = out->Slice<T>(start_pos, end_pos); Tensor out_i = out->Slice(start_pos, end_pos);
Tensor out_grad_i = out_grad->Slice<T>(start_pos, end_pos); Tensor out_grad_i = out_grad->Slice(start_pos, end_pos);
Tensor x_grad_i = x_grad->Slice<T>(start_pos, end_pos); Tensor x_grad_i = x_grad->Slice(start_pos, end_pos);
// Reshape from (end_pos - start_pos) x 1UL to 1UL x (end_pos - start_pos) // Reshape from (end_pos - start_pos) x 1UL to 1UL x (end_pos - start_pos)
framework::DDim dims_i = framework::make_ddim({1UL, end_pos - start_pos}); framework::DDim dims_i = framework::make_ddim({1UL, end_pos - start_pos});
......
...@@ -85,7 +85,7 @@ class SoftmaxWithCrossEntropyGradCUDAKernel : public framework::OpKernel<T> { ...@@ -85,7 +85,7 @@ class SoftmaxWithCrossEntropyGradCUDAKernel : public framework::OpKernel<T> {
context.Input<Tensor>(framework::GradVarName("Loss"))->data<T>(); context.Input<Tensor>(framework::GradVarName("Loss"))->data<T>();
Tensor* logit_grad = Tensor* logit_grad =
context.Output<Tensor>(framework::GradVarName("Logits")); context.Output<Tensor>(framework::GradVarName("Logits"));
logit_grad->ShareDataWith<T>(*context.Input<Tensor>("Softmax")); logit_grad->ShareDataWith(*context.Input<Tensor>("Softmax"));
T* logit_grad_data = logit_grad->data<T>(); T* logit_grad_data = logit_grad->data<T>();
const int batch_size = logit_grad->dims()[0]; const int batch_size = logit_grad->dims()[0];
......
...@@ -57,7 +57,7 @@ class SoftmaxWithCrossEntropyGradKernel : public framework::OpKernel<T> { ...@@ -57,7 +57,7 @@ class SoftmaxWithCrossEntropyGradKernel : public framework::OpKernel<T> {
const Tensor* labels = context.Input<Tensor>("Label"); const Tensor* labels = context.Input<Tensor>("Label");
Tensor* logit_grad = Tensor* logit_grad =
context.Output<Tensor>(framework::GradVarName("Logits")); context.Output<Tensor>(framework::GradVarName("Logits"));
logit_grad->ShareDataWith<T>(*context.Input<Tensor>("Softmax")); logit_grad->ShareDataWith(*context.Input<Tensor>("Softmax"));
const int class_num = logit_grad->dims()[1]; const int class_num = logit_grad->dims()[1];
if (context.Attr<bool>("soft_label")) { if (context.Attr<bool>("soft_label")) {
......
...@@ -53,10 +53,10 @@ class UniformRandomOp : public framework::OperatorWithKernel { ...@@ -53,10 +53,10 @@ class UniformRandomOp : public framework::OperatorWithKernel {
PADDLE_ENFORCE( PADDLE_ENFORCE(
ctx->Attrs().Get<float>("min") < ctx->Attrs().Get<float>("max"), ctx->Attrs().Get<float>("min") < ctx->Attrs().Get<float>("max"),
"uniform_random's min must less then max"); "uniform_random's min must less then max");
auto& dims = ctx->Attrs().Get<std::vector<int>>("dims"); auto& shape = ctx->Attrs().Get<std::vector<int>>("shape");
std::vector<int64_t> temp; std::vector<int64_t> temp;
temp.reserve(dims.size()); temp.reserve(shape.size());
for (auto dim : dims) { for (auto dim : shape) {
temp.push_back(static_cast<int64_t>(dim)); temp.push_back(static_cast<int64_t>(dim));
} }
ctx->SetOutputDim("Out", framework::make_ddim(temp)); ctx->SetOutputDim("Out", framework::make_ddim(temp));
...@@ -78,7 +78,7 @@ class UniformRandomOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -78,7 +78,7 @@ class UniformRandomOpMaker : public framework::OpProtoAndCheckerMaker {
AddComment(R"DOC(Uniform random operator. AddComment(R"DOC(Uniform random operator.
Used to initialize tensor with uniform random generator. Used to initialize tensor with uniform random generator.
)DOC"); )DOC");
AddAttr<std::vector<int>>("dims", "the dimension of random tensor"); AddAttr<std::vector<int>>("shape", "the dimension of random tensor");
AddAttr<float>("min", "Minimum value of uniform random").SetDefault(-1.0f); AddAttr<float>("min", "Minimum value of uniform random").SetDefault(-1.0f);
AddAttr<float>("max", "Maximun value of uniform random").SetDefault(1.0f); AddAttr<float>("max", "Maximun value of uniform random").SetDefault(1.0f);
AddAttr<int>("seed", AddAttr<int>("seed",
......
...@@ -84,10 +84,12 @@ PYBIND11_PLUGIN(core) { ...@@ -84,10 +84,12 @@ PYBIND11_PLUGIN(core) {
.def("set", PyCPUTensorSetFromArray<float>) .def("set", PyCPUTensorSetFromArray<float>)
.def("set", PyCPUTensorSetFromArray<int>) .def("set", PyCPUTensorSetFromArray<int>)
.def("set", PyCPUTensorSetFromArray<double>) .def("set", PyCPUTensorSetFromArray<double>)
.def("set", PyCPUTensorSetFromArray<int64_t>)
#ifdef PADDLE_WITH_CUDA #ifdef PADDLE_WITH_CUDA
.def("set", PyCUDATensorSetFromArray<float>) .def("set", PyCUDATensorSetFromArray<float>)
.def("set", PyCUDATensorSetFromArray<int>) .def("set", PyCUDATensorSetFromArray<int>)
.def("set", PyCUDATensorSetFromArray<double>) .def("set", PyCUDATensorSetFromArray<double>)
.def("set", PyCUDATensorSetFromArray<int64_t>)
#endif #endif
.def("shape", [](Tensor &self) { return vectorize(self.dims()); }) .def("shape", [](Tensor &self) { return vectorize(self.dims()); })
.def("set_float_element", TensorSetElement<float>) .def("set_float_element", TensorSetElement<float>)
...@@ -217,8 +219,7 @@ All parameter, weight, gradient are variables in Paddle. ...@@ -217,8 +219,7 @@ All parameter, weight, gradient are variables in Paddle.
.def(py::init<>()) .def(py::init<>())
.def("new_scope", [](Scope &self) -> Scope * { return &self.NewScope(); }, .def("new_scope", [](Scope &self) -> Scope * { return &self.NewScope(); },
py::return_value_policy::reference) py::return_value_policy::reference)
.def("drop_kids", &Scope::DropKids) .def("drop_kids", &Scope::DropKids);
.def_static("global_scope", &GetGlobalScope);
//! @note: Be careful! PyBind will return std::string as an unicode, not //! @note: Be careful! PyBind will return std::string as an unicode, not
//! Python str. If you want a str object, you should cast them in Python. //! Python str. If you want a str object, you should cast them in Python.
...@@ -412,18 +413,18 @@ All parameter, weight, gradient are variables in Paddle. ...@@ -412,18 +413,18 @@ All parameter, weight, gradient are variables in Paddle.
return static_cast<operators::DynamicRecurrentOp *>( return static_cast<operators::DynamicRecurrentOp *>(
rnn_op.release()); rnn_op.release());
}) })
.def("set_stepnet", .def("set_step_unit",
[](operators::DynamicRecurrentOp &self, const operators::NetOp &net) [](operators::DynamicRecurrentOp &self, const operators::NetOp &net)
-> void { self.SetStepNet(net.Clone()); }) -> void { self.rnn.SetStepUnit(net.Clone()); })
.def("get_state", .def("get_state",
[](operators::DynamicRecurrentOp &self, const std::string &name) [](operators::DynamicRecurrentOp &self, const std::string &name)
-> const TensorArray & { return self.state(name); }) -> const TensorArray & { return self.rnn.state(name); })
.def("get_step_input", .def("get_step_input",
[](operators::DynamicRecurrentOp &self, const std::string &name) [](operators::DynamicRecurrentOp &self, const std::string &name)
-> const TensorArray & { return self.step_input(name); }) -> const TensorArray & { return self.rnn.step_input(name); })
.def("get_step_output", .def("get_step_output",
[](operators::DynamicRecurrentOp &self, const std::string &name) [](operators::DynamicRecurrentOp &self, const std::string &name)
-> const TensorArray & { return self.step_output(name); }); -> const TensorArray & { return self.rnn.step_output(name); });
// cond_op // cond_op
py::class_<operators::CondOp, OperatorBase>(m, "CondOp") py::class_<operators::CondOp, OperatorBase>(m, "CondOp")
...@@ -449,19 +450,15 @@ All parameter, weight, gradient are variables in Paddle. ...@@ -449,19 +450,15 @@ All parameter, weight, gradient are variables in Paddle.
py::class_<framework::Executor>(m, "Executor") py::class_<framework::Executor>(m, "Executor")
.def(py::init<std::vector<platform::Place> &>()) .def(py::init<std::vector<platform::Place> &>())
.def("run", .def("run", [](Executor &self, ProgramDescBind *program_bind,
[](Executor &self, ProgramDescBind *program_bind, int block_id) { Scope *scope, int block_id) {
framework::Scope &global_scope = GetGlobalScope(); self.Run(*program_bind->Proto(), scope, block_id);
self.Run(*program_bind->Proto(), &global_scope, block_id); });
});
m.def("unique_integer", UniqueIntegerGenerator); m.def("unique_integer", UniqueIntegerGenerator);
m.def("is_compile_gpu", IsCompileGPU); m.def("is_compile_gpu", IsCompileGPU);
//! FIXME: it is no need to `set_xxx_float/double/int` m.def("set_feed_variable", framework::SetFeedVariable);
m.def("set_feed_variable_float", framework::SetFeedVariable<float>);
m.def("set_feed_variable_double", framework::SetFeedVariable<double>);
m.def("set_feed_variable_int", framework::SetFeedVariable<int>);
m.def("get_fetch_variable", framework::GetFetchVariable); m.def("get_fetch_variable", framework::GetFetchVariable);
BindProgramDesc(m); BindProgramDesc(m);
......
import paddle.v2.framework.core as core import paddle.v2.framework.core as core
from paddle.v2.framework.framework import Block, Program from paddle.v2.framework.framework import Block, Program
g_scope = core.Scope()
class Executor(object): class Executor(object):
def __init__(self, places): def __init__(self, places):
...@@ -20,10 +22,14 @@ class Executor(object): ...@@ -20,10 +22,14 @@ class Executor(object):
feed, feed,
fetch_list, fetch_list,
feed_var_name='feed', feed_var_name='feed',
fetch_var_name='fetch'): fetch_var_name='fetch',
scope=None):
if not isinstance(program, Program): if not isinstance(program, Program):
raise TypeError() raise TypeError()
if scope is None:
scope = g_scope
program = program.clone() program = program.clone()
global_block = program.global_block() global_block = program.global_block()
feed_var = global_block.create_var( feed_var = global_block.create_var(
...@@ -38,8 +44,7 @@ class Executor(object): ...@@ -38,8 +44,7 @@ class Executor(object):
inputs={'X': [feed_var]}, inputs={'X': [feed_var]},
outputs={'Out': [out]}, outputs={'Out': [out]},
attrs={'col': i}) attrs={'col': i})
# FIXME core.set_feed_variable(scope, feed[name], feed_var.name, i)
core.set_feed_variable_float(feed[name], feed_var.name, i)
fetch_var = global_block.create_var( fetch_var = global_block.create_var(
name=fetch_var_name, name=fetch_var_name,
...@@ -52,8 +57,8 @@ class Executor(object): ...@@ -52,8 +57,8 @@ class Executor(object):
outputs={'Out': [fetch_var]}, outputs={'Out': [fetch_var]},
attrs={'col': i}) attrs={'col': i})
self.executor.run(program.desc, 0) self.executor.run(program.desc, scope, 0)
return [ return [
core.get_fetch_variable(fetch_var_name, i) core.get_fetch_variable(scope, fetch_var_name, i)
for i in xrange(len(fetch_list)) for i in xrange(len(fetch_list))
] ]
...@@ -15,7 +15,7 @@ class Variable(object): ...@@ -15,7 +15,7 @@ class Variable(object):
shape=None, shape=None,
dtype=None, dtype=None,
lod_level=None, lod_level=None,
persistable=False, persistable=None,
**kwargs): **kwargs):
self.block = block self.block = block
...@@ -343,6 +343,8 @@ class Block(object): ...@@ -343,6 +343,8 @@ class Block(object):
def create_parameter(self, *args, **kwargs): def create_parameter(self, *args, **kwargs):
global_block = self.program.global_block() global_block = self.program.global_block()
param = Parameter(global_block, *args, **kwargs) param = Parameter(global_block, *args, **kwargs)
if 'init_attr' in kwargs:
self._prepend_initialize_ops_(param, kwargs['init_attr'])
return param return param
def append_op(self, *args, **kwargs): def append_op(self, *args, **kwargs):
...@@ -401,6 +403,17 @@ class Block(object): ...@@ -401,6 +403,17 @@ class Block(object):
for index in range(len(self.ops)): for index in range(len(self.ops)):
assert self.ops[index].desc == ops_in_cpp[index] assert self.ops[index].desc == ops_in_cpp[index]
def _prepend_initialize_ops_(self, param, init_attr):
op_type = init_attr['type']
init_attr['shape'] = param.shape
init_attr['data_type'] = int(param.data_type)
op = self.prepend_op(
type=op_type,
inputs=None,
outputs={'Out': [param]},
attrs=init_attr)
param.op = op
class Program(object): class Program(object):
def __init__(self): def __init__(self):
...@@ -475,27 +488,10 @@ class Parameter(Variable): ...@@ -475,27 +488,10 @@ class Parameter(Variable):
Variable.__init__( Variable.__init__(
self, block, persistable=True, shape=shape, dtype=dtype, **kwargs) self, block, persistable=True, shape=shape, dtype=dtype, **kwargs)
self.trainable = kwargs.get('trainable', True) self.trainable = kwargs.get('trainable', True)
self.init_attr = kwargs.get('initialize_attr', {
'type': 'uniform_random',
'min': -1.0,
'max': 1.0
})
self.optimize_attr = kwargs.get('optimize_attr', {'learning_rate': 1.0}) self.optimize_attr = kwargs.get('optimize_attr', {'learning_rate': 1.0})
self._append_initialize_ops_()
def _append_initialize_ops_(self):
attr = self.init_attr
op_type = attr.pop('type', None)
block = self.block
assert isinstance(block, Block)
shape = self.shape
attr['dims'] = shape
attr['data_type'] = int(self.data_type)
op = block.prepend_op(
type=op_type, inputs=None, outputs={'Out': [self]}, attrs=attr)
self.op = op
# program is a global instance. # program is a global instance.
g_program = Program() g_program = Program()
g_init_program = Program()
from paddle.v2.framework.framework import Variable, OpProtoHolder, g_program from paddle.v2.framework.framework import Variable, OpProtoHolder, g_program, g_init_program
import paddle.v2.framework.core as core import paddle.v2.framework.core as core
import copy import copy
import itertools import itertools
...@@ -29,6 +29,14 @@ class LayerHelper(object): ...@@ -29,6 +29,14 @@ class LayerHelper(object):
else: else:
return prog return prog
@property
def init_program(self):
prog = self.kwargs.get('init_program', None)
if prog is None:
return g_init_program
else:
return prog
def append_op(self, *args, **kwargs): def append_op(self, *args, **kwargs):
return self.program.current_block().append_op(*args, **kwargs) return self.program.current_block().append_op(*args, **kwargs)
...@@ -66,16 +74,14 @@ class LayerHelper(object): ...@@ -66,16 +74,14 @@ class LayerHelper(object):
actual = self.kwargs.get('param_attr', None) actual = self.kwargs.get('param_attr', None)
return actual if actual is not None else default return actual if actual is not None else default
def bias_attr(self, shape, dtype): def bias_attr(self):
bias_attr = self.kwargs.get('bias_attr', None) bias_attr = self.kwargs.get('bias_attr', None)
if bias_attr is True: if bias_attr is True:
bias_attr = { bias_attr = {
'name': None, 'name': None,
'init_attr': { 'init_attr': {
'type': 'fill_constant', 'type': 'fill_constant',
'value': 0.0, 'value': 0.0
'shape': shape,
'dataType': dtype
} }
} }
return bias_attr return bias_attr
...@@ -113,22 +119,27 @@ class LayerHelper(object): ...@@ -113,22 +119,27 @@ class LayerHelper(object):
def create_parameter(self, attr, shape, dtype, suffix='w'): def create_parameter(self, attr, shape, dtype, suffix='w'):
if attr['name'] is None: if attr['name'] is None:
attr['name'] = unique_name(".".join([self.name, suffix])) attr['name'] = unique_name(".".join([self.name, suffix]))
return self.program.global_block().create_parameter( self.init_program.global_block().create_parameter(
name=attr['name'], name=attr['name'],
dtype=dtype, dtype=dtype,
shape=shape, shape=shape,
initialize_attr=attr['init_attr']) init_attr=attr['init_attr'])
return self.program.global_block().create_parameter(
name=attr['name'], dtype=dtype, shape=shape)
def create_tmp_variable(self, dtype): def create_tmp_variable(self, dtype):
return self.program.current_block().create_var( return self.program.current_block().create_var(
name=unique_name(".".join([self.name, 'tmp'])), dtype=dtype) name=unique_name(".".join([self.name, 'tmp'])),
dtype=dtype,
persistable=False)
def create_global_variable(self, *args, **kwargs): def create_global_variable(self, *args, **kwargs):
return self.program.global_block().create_var(*args, **kwargs) return self.program.global_block().create_var(
*args, persistable=False, **kwargs)
def append_bias_op(self, input_var): def append_bias_op(self, input_var):
size = list(input_var.shape[1:]) size = list(input_var.shape[1:])
bias_attr = self.bias_attr(size, dtype=input_var.data_type) bias_attr = self.bias_attr()
if not bias_attr: if not bias_attr:
return input_var return input_var
......
...@@ -13,7 +13,8 @@ def fc(input, ...@@ -13,7 +13,8 @@ def fc(input,
name=None, name=None,
act=None, act=None,
num_flatten_dims=1, num_flatten_dims=1,
program=None): program=None,
init_program=None):
# create helper # create helper
helper = LayerHelper('fc', **locals()) helper = LayerHelper('fc', **locals())
...@@ -59,7 +60,8 @@ def data(name, ...@@ -59,7 +60,8 @@ def data(name,
data_type='float32', data_type='float32',
type=core.VarDesc.VarType.LOD_TENSOR, type=core.VarDesc.VarType.LOD_TENSOR,
append_batch_size=True, append_batch_size=True,
program=None): program=None,
init_program=None):
helper = LayerHelper('data', **locals()) helper = LayerHelper('data', **locals())
if append_batch_size: if append_batch_size:
shape = [-1] + shape # append batch size as -1 shape = [-1] + shape # append batch size as -1
...@@ -160,7 +162,8 @@ def conv2d(input, ...@@ -160,7 +162,8 @@ def conv2d(input,
padding=None, padding=None,
bias_attr=None, bias_attr=None,
param_attr=None, param_attr=None,
program=None): program=None,
init_program=None):
helper = LayerHelper('conv2d', **locals()) helper = LayerHelper('conv2d', **locals())
dtype = helper.input_dtype() dtype = helper.input_dtype()
...@@ -207,7 +210,8 @@ def pool2d(input, ...@@ -207,7 +210,8 @@ def pool2d(input,
pool_stride=[1, 1], pool_stride=[1, 1],
pool_padding=[0, 0], pool_padding=[0, 0],
global_pooling=False, global_pooling=False,
program=None): program=None,
init_program=None):
if pool_type not in ["max", "avg"]: if pool_type not in ["max", "avg"]:
raise ValueError( raise ValueError(
"Unknown pool_type: '%s'. It can only be 'max' or 'avg'.", "Unknown pool_type: '%s'. It can only be 'max' or 'avg'.",
......
...@@ -7,18 +7,21 @@ def simple_img_conv_pool(input, ...@@ -7,18 +7,21 @@ def simple_img_conv_pool(input,
pool_size, pool_size,
pool_stride, pool_stride,
act, act,
program=None): program=None,
init_program=None):
conv_out = layers.conv2d( conv_out = layers.conv2d(
input=input, input=input,
num_filters=num_filters, num_filters=num_filters,
filter_size=filter_size, filter_size=filter_size,
act=act, act=act,
program=program) program=program,
init_program=init_program)
pool_out = layers.pool2d( pool_out = layers.pool2d(
input=conv_out, input=conv_out,
pool_size=pool_size, pool_size=pool_size,
pool_type='max', pool_type='max',
pool_stride=pool_stride, pool_stride=pool_stride,
program=program) program=program,
init_program=init_program)
return pool_out return pool_out
import paddle.v2.framework.framework as framework import paddle.v2.framework.framework as framework
from collections import defaultdict
__all__ = ['SGDOptimizer'] __all__ = ['SGDOptimizer', 'MomentumOptimizer']
class Optimizer(object): class Optimizer(object):
"""Optimizer Base class. """Optimizer Base class.
Define the common interface of an optimizer. Define the common interface of an optimizer.
User should not use this class directly, but need to use one of it's implementation. User should not use this class directly,
but need to use one of it's implementation.
""" """
def __init__(self): def __init__(self):
pass # Dictionary of accumulators. Some optimizer subclasses need to
# allocate and manage extra variables associated with the parameters
# to train. These variables are called accumulators.
# {accum_name : { paramter_name : accumulator_for_parameter, ...}, ...}
self._accumulators = defaultdict(lambda: dict())
def _append_optimize_op(self, block, param_and_grad): def _append_optimize_op(self, block, param_and_grad):
""" append optimize operator to block and return all the added optimize_op """ append optimize operator to block and return all the added optimize_op
""" """
raise NotImplementedError() raise NotImplementedError()
def create_backward_pass(self, loss, parameter_list=None, no_grad_set=None): def _initialize_tensors(self, block):
"""Create all necessary tensors, that will be shared for all parameter updates.
Tensors like learning rate should be initialized here.
Args:
block: the block in which the loss variable is present
"""
pass
def _create_accumulators(self, block, parameters):
"""Create all accumulators needed by the parameters
Args:
block: the block in which the loss variable is present
parameters: list of parameter variables for the optimizer
""" """
create and add gradient Operators in BlockDesc to Compute gradients of `loss` pass
for parameters in parameter_list
def _add_accumulator(self, block, name, param, dtype=None, fill_value=0.0):
"""Utility function to add an accumulator for a parameter
Args:
block: the block in which the loss variable is present
name: name of the accumulator
param: parameter variable for which accumulator is to be added
dtype: data type of the accumulator variable
fill_value: value to initialize the accumulator variable
"""
if (name in self._accumulators and
param.name in self._accumulators[name]):
raise Exception("Accumulator {} already exists for parmeter {}".
format(name, param.name))
global_block = block.program.global_block()
param_shape = list(param.shape)
param_acc = global_block.create_var(
dtype=dtype, shape=param_shape, lod_level=0)
# Initialize the accumulator with fill_value
# FIXME: Fix when Initialization design has been implemented
# https://github.com/PaddlePaddle/Paddle/pull/4852
global_block.append_op(
type="fill_constant",
outputs={"Out": param_acc},
attrs={"shape": param_shape,
"value": fill_value})
# Add to accumulators dict
self._accumulators[name][param.name] = param_acc
def _get_accumulator(self, name, param):
"""Utility function to fetch an accumulator for a parameter
Args:
name: name of the accumulator
param: parameter variable for which accumulator is to be fetched
Returns:
accumulator variable for the parameter
"""
if (name not in self._accumulators or
param.name not in self._accumulators[name]):
raise Exception("Accumulator {} does not exist for parameter {}".
format(name, param.name))
return self._accumulators[name][param.name]
def create_backward_pass(self, loss, parameter_list=None, no_grad_set=None):
"""Create and add gradient Operators in BlockDesc to compute
gradients of `loss` for parameters in parameter_list
Args: Args:
loss: an variable generated by cost function. loss: an variable generated by cost function.
no_grad_set: variable that should not create gradient no_grad_set: variable that should not create gradient
parameter_list: parameters that need to compute gradient and update to optimize the lost. parameter_list: parameters that need to compute gradient and
update to optimize the lost.
Returns: Returns:
list of (parameters, gradients) pair. list of (parameters, gradients) pair.
...@@ -48,7 +120,8 @@ class Optimizer(object): ...@@ -48,7 +120,8 @@ class Optimizer(object):
if not grad_block.has_var(grad_info[0]): if not grad_block.has_var(grad_info[0]):
raise Exception("grad block[%d] did not have grad var %s" % raise Exception("grad block[%d] did not have grad var %s" %
grad_info[1], grad_info[0]) grad_info[1], grad_info[0])
param_var = loss.block.var(param) # Get the param var from the global block
param_var = loss.block.program.global_block().var(param)
grad_var = grad_block.var(grad_info[0]) grad_var = grad_block.var(grad_info[0])
if loss.block.has_var(grad_info[0]): if loss.block.has_var(grad_info[0]):
params_and_grads.append((param_var, grad_var)) params_and_grads.append((param_var, grad_var))
...@@ -64,14 +137,29 @@ class Optimizer(object): ...@@ -64,14 +137,29 @@ class Optimizer(object):
parameters_and_grads: a list of (variable, gradient) pair to update. parameters_and_grads: a list of (variable, gradient) pair to update.
Returns: Returns:
optmization_op_list: a list of optimization operator that will update parameter using gradient. optmization_op_list: a list of optimization operator that will update
parameter using gradient.
""" """
# This is a default implementation of create_optimization_pass that
# can be shared by most optimizers. This implementation assumes that
# the subclass will implement the _append_optimize_op method and the
# _initialize_tensors method. The subclass can extend the
# _create_accumulators method if it needs to create accumulators
# for parameters.
# Create any accumulators
self._create_accumulators(loss.block,
[p[0] for p in parameters_and_grads])
# Create any necessary tensors
self._initialize_tensors(loss.block)
optimize_ops = [] optimize_ops = []
for param_and_grad in parameters_and_grads: for param_and_grad in parameters_and_grads:
if param_and_grad[1] is not None: if param_and_grad[1] is not None:
optimize_op = self._append_optimize_op(loss.block, optimize_op = self._append_optimize_op(loss.block,
param_and_grad) param_and_grad)
optimize_ops.append(optimize_op) optimize_ops.append(optimize_op)
return optimize_ops return optimize_ops
def minimize(self, loss, parameter_list=None, no_grad_set=None): def minimize(self, loss, parameter_list=None, no_grad_set=None):
...@@ -92,33 +180,95 @@ class SGDOptimizer(Optimizer): ...@@ -92,33 +180,95 @@ class SGDOptimizer(Optimizer):
def __init__(self, learning_rate): def __init__(self, learning_rate):
assert learning_rate is not None assert learning_rate is not None
super(Optimizer, self).__init__() super(SGDOptimizer, self).__init__()
self.type = "sgd" self.type = "sgd"
self._learning_rate = learning_rate self._learning_rate = learning_rate
def _append_optimize_op(self, block, param_and_grad): def _initialize_tensors(self, block):
assert isinstance(block, framework.Block) assert isinstance(block, framework.Block)
lr_shape = [1] lr_shape = [1]
# create a var for learning_rate # create a variable for learning_rate
lr = block.create_var(dtype="float32", shape=lr_shape, lod_level=0) self._lr = block.create_var(
dtype="float32", shape=lr_shape, lod_level=0)
# create an op to init the learning_rate # create an op to init the learning_rate
init_op = block.append_op( # FIXME: Fix when Initialization design has been implemented
# https://github.com/PaddlePaddle/Paddle/pull/4852
block.append_op(
type="fill_constant", type="fill_constant",
outputs={"Out": lr}, outputs={"Out": self._lr},
attrs={"shape": lr_shape, attrs={"shape": lr_shape,
"value": self._learning_rate}) "value": self._learning_rate})
def _append_optimize_op(self, block, param_and_grad):
assert isinstance(block, framework.Block)
# create the optimize op # create the optimize op
sgd_op = block.append_op( sgd_op = block.append_op(
type=self.type, type=self.type,
inputs={ inputs={
"Param": param_and_grad[0], "Param": param_and_grad[0],
"Grad": param_and_grad[1], "Grad": param_and_grad[1],
"LearningRate": lr "LearningRate": self._lr
}, },
outputs={"ParamOut": param_and_grad[0]}, outputs={"ParamOut": param_and_grad[0]})
attrs={"shape": [1],
"value": self._learning_rate})
return sgd_op return sgd_op
class MomentumOptimizer(Optimizer):
"""Simple Momentum optimizer with velocity state
"""
_velocity_acc_str = "velocity"
def __init__(self, learning_rate, momentum):
assert learning_rate is not None
assert momentum is not None
super(MomentumOptimizer, self).__init__()
self.type = "momentum"
self._learning_rate = learning_rate
self._momentum = momentum
def _initialize_tensors(self, block):
assert isinstance(block, framework.Block)
lr_shape = [1]
# create a variable for learning_rate
self._lr = block.create_var(
dtype="float32", shape=lr_shape, lod_level=0)
# create an op to init the learning_rate
# FIXME: Fix when Initialization design has been implemented
# https://github.com/PaddlePaddle/Paddle/pull/4852
block.append_op(
type="fill_constant",
outputs={"Out": self._lr},
attrs={"shape": lr_shape,
"value": self._learning_rate})
def _create_accumulators(self, block, parameters):
assert isinstance(block, framework.Block)
for p in parameters:
self._add_accumulator(block, self._velocity_acc_str, p, 'float32')
def _append_optimize_op(self, block, param_and_grad):
assert isinstance(block, framework.Block)
velocity_acc = self._get_accumulator(self._velocity_acc_str,
param_and_grad[0])
# create the momentum optimize op
momentum_op = block.append_op(
type=self.type,
inputs={
"Param": param_and_grad[0],
"Grad": param_and_grad[1],
"Velocity": velocity_acc,
"LearningRate": self._lr
},
outputs={
"ParamOut": param_and_grad[0],
"VelocityOut": velocity_acc
},
attrs={"mu": self._momentum})
return momentum_op
...@@ -21,7 +21,7 @@ class TestCrossEntropyOp1(OpTest): ...@@ -21,7 +21,7 @@ class TestCrossEntropyOp1(OpTest):
self.inputs = {"X": X, "Label": label} self.inputs = {"X": X, "Label": label}
self.outputs = {"Y": cross_entropy} self.outputs = {"Y": cross_entropy}
self.attrs = {"softLabel": False} self.attrs = {"soft_label": False}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output()
......
...@@ -4,6 +4,12 @@ import unittest ...@@ -4,6 +4,12 @@ import unittest
from paddle.v2.framework.op import Operator, DynamicRecurrentOp from paddle.v2.framework.op import Operator, DynamicRecurrentOp
import numpy as np import numpy as np
# for siplicity, just one level LoD
lod_py = [[0, 4, 7, 9, 10]]
input_dim = 30
num_sents = len(lod_py[0]) - 1
weight_dim = 15
def create_tensor(scope, name, shape, np_data): def create_tensor(scope, name, shape, np_data):
tensor = scope.var(name).get_tensor() tensor = scope.var(name).get_tensor()
...@@ -12,6 +18,17 @@ def create_tensor(scope, name, shape, np_data): ...@@ -12,6 +18,17 @@ def create_tensor(scope, name, shape, np_data):
return tensor return tensor
class PyRNNStep(object):
def __init__(self):
self.x = np.random.normal(size=(lod_py[0][-1],
input_dim)).astype("float32")
self.W = np.random.normal(size=(input_dim, input_dim)).astype("float32")
self.U = np.random.normal(size=(input_dim, input_dim)).astype("float32")
self.h_boot = np.random.normal(size=(num_sents,
input_dim)).astype("float32")
class DynamicRecurrentOpTest(unittest.TestCase): class DynamicRecurrentOpTest(unittest.TestCase):
''' '''
Test RNNOp Test RNNOp
...@@ -23,17 +40,13 @@ class DynamicRecurrentOpTest(unittest.TestCase): ...@@ -23,17 +40,13 @@ class DynamicRecurrentOpTest(unittest.TestCase):
- U - U
vars: vars:
- x - x
memories: states:
- h - h
outputs: outputs:
- h - h
''' '''
# for siplicity, just one level LoD py = PyRNNStep()
lod_py = [[0, 4, 7, 9, 10]]
input_dim = 30
num_sents = len(lod_py[0]) - 1
weight_dim = 15
def forward(self): def forward(self):
self.scope = core.Scope() self.scope = core.Scope()
...@@ -42,64 +55,55 @@ class DynamicRecurrentOpTest(unittest.TestCase): ...@@ -42,64 +55,55 @@ class DynamicRecurrentOpTest(unittest.TestCase):
self.create_step_net() self.create_step_net()
ctx = core.DeviceContext.create(core.CPUPlace()) ctx = core.DeviceContext.create(core.CPUPlace())
self.rnnop.run(self.scope, ctx) self.rnnop.run(self.scope, ctx)
state = self.rnnop.get_state("h@mem") state = self.rnnop.get_state("h@state")
print 'state size: ', state.size() print 'state size: ', state.size()
step_inputs = self.rnnop.get_step_input("x") step_inputs = self.rnnop.get_step_input("x")
print "x size ", step_inputs.size() print "x size ", step_inputs.size()
for i in range(step_inputs.size()): for i in range(step_inputs.size()):
print "x %d" % i, np.array(step_inputs.read(i).get_dims()) print "x %d" % i, np.array(step_inputs.read(i).get_dims())
step_outputs = self.rnnop.get_step_output('h@mem') step_outputs = self.rnnop.get_step_output('h@state')
print 'step_outputs.size ', step_outputs.size() print 'step_outputs.size ', step_outputs.size()
output = self.scope.find_var("h@mem").get_tensor() output = self.scope.find_var("h@state").get_tensor()
print 'output', np.array(output).shape print 'output', np.array(output).shape
def create_global_variables(self): def create_global_variables(self):
x = np.random.normal(size=(self.lod_py[0][-1],
self.input_dim)).astype("float32")
W = np.random.normal(size=(self.input_dim,
self.input_dim)).astype("float32")
U = np.random.normal(size=(self.input_dim,
self.input_dim)).astype("float32")
h_boot = np.random.normal(size=(self.num_sents,
self.input_dim)).astype("float32")
# create inlink # create inlink
x_tensor = create_tensor(self.scope, "x", x_tensor = create_tensor(self.scope, "x", [num_sents, input_dim],
[self.num_sents, self.input_dim], x) self.py.x)
x_tensor.set_lod(self.lod_py) x_tensor.set_lod(lod_py)
create_tensor(self.scope, "W", [self.input_dim, self.input_dim], W) create_tensor(self.scope, "W", [input_dim, input_dim], self.py.W)
create_tensor(self.scope, "U", [self.input_dim, self.input_dim], U) create_tensor(self.scope, "U", [input_dim, input_dim], self.py.U)
create_tensor(self.scope, "h_boot", [self.num_sents, self.input_dim], create_tensor(self.scope, "h_boot", [num_sents, input_dim],
h_boot) self.py.h_boot)
self.scope.var("step_scopes") self.scope.var("step_scopes")
self.scope.var("h@mem") self.scope.var("h@state")
def create_rnn_op(self): def create_rnn_op(self):
# create RNNOp # create RNNOp
self.rnnop = DynamicRecurrentOp( self.rnnop = DynamicRecurrentOp(
# inputs # inputs
inlinks=["x"], inputs=["x"],
boot_memories=["h_boot"], initial_states=["h_boot"],
step_net="stepnet", step_net="step_unit",
# outputs # outputs
outlinks=["h@mem"], outputs=["h@state"],
step_scopes="step_scopes", step_scopes="step_scopes",
# attributes # attributes
pre_memories=["h@pre"], ex_states=["h@pre"],
memories=["h@mem"]) states=["h@state"])
def create_step_net(self): def create_step_net(self):
stepnet = core.Net.create() step_unit = core.Net.create()
x_fc_op = Operator("mul", X="x", Y="W", Out="Wx") x_fc_op = Operator("mul", X="x", Y="W", Out="Wx")
h_fc_op = Operator("mul", X="h@pre", Y="U", Out="Uh") h_fc_op = Operator("mul", X="h@pre", Y="U", Out="Uh")
sum_op = Operator("sum", X=["Wx", "Uh"], Out="sum") sum_op = Operator("sum", X=["Wx", "Uh"], Out="sum")
sig_op = Operator("sigmoid", X="sum", Y="h@mem") sig_op = Operator("sigmoid", X="sum", Y="h@state")
for op in [x_fc_op, h_fc_op, sum_op, sig_op]: for op in [x_fc_op, h_fc_op, sum_op, sig_op]:
stepnet.append_op(op) step_unit.append_op(op)
stepnet.complete_add_op(True) step_unit.complete_add_op(True)
self.rnnop.set_stepnet(stepnet) self.rnnop.set_step_unit(step_unit)
def test_forward(self): def test_forward(self):
print 'test recurrent op forward' print 'test recurrent op forward'
...@@ -107,5 +111,58 @@ class DynamicRecurrentOpTest(unittest.TestCase): ...@@ -107,5 +111,58 @@ class DynamicRecurrentOpTest(unittest.TestCase):
print 'pd_output', pd_output print 'pd_output', pd_output
class RecurrentGradientOpTest(unittest.TestCase):
py = PyRNNStep()
def create_forward_op(self):
# create RNNOp
self.forward_op = DynamicRecurrentOp(
# inputs
inputs=["x"],
initial_states=["h_boot"],
step_net="step_unit",
# outputs
outputs=["h@state"],
step_scopes="step_scopes",
# attributes
ex_states=["h@pre"],
states=["h@state"])
def create_gradient_op(self):
a = set()
backward_op = core.DynamicRecurrentOp.backward(self.forward_op, a)
def create_step_net(self):
step_unit = core.Net.create()
x_fc_op = Operator("mul", X="x", Y="W", Out="Wx")
h_fc_op = Operator("mul", X="h@pre", Y="U", Out="Uh")
sum_op = Operator("sum", X=["Wx", "Uh"], Out="sum")
sig_op = Operator("sigmoid", X="sum", Y="h@state")
for op in [x_fc_op, h_fc_op, sum_op, sig_op]:
step_unit.append_op(op)
step_unit.complete_add_op(True)
self.forward_op.set_step_unit(step_unit)
def create_global_variables(self):
# create inlink
x_tensor = create_tensor(self.scope, "x", [num_sents, input_dim],
self.py.x)
x_tensor.set_lod(lod_py)
create_tensor(self.scope, "W", [input_dim, input_dim], self.py.W)
create_tensor(self.scope, "U", [input_dim, input_dim], self.py.U)
create_tensor(self.scope, "h_boot", [num_sents, input_dim],
self.py.h_boot)
self.scope.var("step_scopes")
self.scope.var("h@state")
def test_grad(self):
self.scope = core.Scope()
self.create_forward_op()
self.create_global_variables()
self.create_step_net()
self.create_gradient_op()
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -92,5 +92,33 @@ class TestElementwiseAddOp_broadcast_3(TestElementwiseOp): ...@@ -92,5 +92,33 @@ class TestElementwiseAddOp_broadcast_3(TestElementwiseOp):
} }
class TestElementwiseAddOp_rowwise_add_0(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_add"
self.inputs = {
'X': np.random.rand(2, 3, 4).astype(np.float32),
'Y': np.random.rand(3, 4).astype(np.float32)
}
self.attrs = {'axis': 1}
self.outputs = {
'Out': self.inputs['X'] + self.inputs['Y'].reshape(1, 3, 4)
}
class TestElementwiseAddOp_rowwise_add_1(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_add"
self.inputs = {
'X': np.random.rand(2, 1).astype(np.float32),
'Y': np.random.rand(1).astype(np.float32)
}
self.attrs = {'axis': 1}
self.outputs = {
'Out': self.inputs['X'] + self.inputs['Y'].reshape(1, 1)
}
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -5,6 +5,7 @@ import numpy as np ...@@ -5,6 +5,7 @@ import numpy as np
class TestFeedFetch(unittest.TestCase): class TestFeedFetch(unittest.TestCase):
def test_feed_fetch(self): def test_feed_fetch(self):
scope = core.Scope()
place = core.CPUPlace() place = core.CPUPlace()
input_array = np.ones((4, 4, 6)).astype("float32") input_array = np.ones((4, 4, 6)).astype("float32")
input_array[0, 0, 0] = 3 input_array[0, 0, 0] = 3
...@@ -12,9 +13,9 @@ class TestFeedFetch(unittest.TestCase): ...@@ -12,9 +13,9 @@ class TestFeedFetch(unittest.TestCase):
input_tensor = core.LoDTensor([[0, 2, 4]]) input_tensor = core.LoDTensor([[0, 2, 4]])
input_tensor.set(input_array, place) input_tensor.set(input_array, place)
core.set_feed_variable_float(input_tensor, "feed", 0) core.set_feed_variable(scope, input_tensor, "feed", 0)
output_tensor = core.get_fetch_variable("feed", 0) output_tensor = core.get_fetch_variable(scope, "feed", 0)
output_lod = output_tensor.lod() output_lod = output_tensor.lod()
self.assertEqual(0, output_lod[0][0]) self.assertEqual(0, output_lod[0][0])
......
import paddle.v2 as paddle
import paddle.v2.framework.layers as layers
import paddle.v2.framework.core as core
import paddle.v2.framework.optimizer as optimizer
from paddle.v2.framework.framework import Program, g_program
from paddle.v2.framework.executor import Executor
import numpy as np
init_program = Program()
program = Program()
x = layers.data(
name='x',
shape=[13],
data_type='float32',
program=program,
init_program=init_program)
y_predict = layers.fc(input=x,
size=1,
act=None,
program=program,
init_program=init_program)
y = layers.data(
name='y',
shape=[1],
data_type='float32',
program=program,
init_program=init_program)
cost = layers.square_error_cost(
input=y_predict, label=y, program=program, init_program=init_program)
avg_cost = layers.mean(x=cost, program=program, init_program=init_program)
sgd_optimizer = optimizer.SGDOptimizer(learning_rate=0.001)
opts = sgd_optimizer.minimize(avg_cost)
BATCH_SIZE = 20
train_reader = paddle.batch(
paddle.reader.shuffle(
paddle.dataset.uci_housing.train(), buf_size=500),
batch_size=BATCH_SIZE)
place = core.CPUPlace()
exe = Executor(place)
exe.run(init_program, feed={}, fetch_list=[])
PASS_NUM = 100
for pass_id in range(PASS_NUM):
for data in train_reader():
x_data = np.array(map(lambda x: x[0], data)).astype("float32")
y_data = np.array(map(lambda x: x[1], data)).astype("float32")
tensor_x = core.LoDTensor()
tensor_x.set(x_data, place)
# print tensor_x.get_dims()
tensor_y = core.LoDTensor()
tensor_y.set(y_data, place)
# print tensor_y.get_dims()
outs = exe.run(program,
feed={'x': tensor_x,
'y': tensor_y},
fetch_list=[avg_cost])
out = np.array(outs[0])
if out[0] < 10.0:
exit(0) # if avg cost less than 10.0, we think our code is good.
exit(1)
import unittest
import numpy as np
from op_test import OpTest
class TestIncrementOpPositiveStep(OpTest):
"""Test increment op with positive step
"""
def setUp(self):
self.op_type = "increment"
self.inputs = {'X': np.random.random((10, 10)).astype("float32")}
self.attrs = {'step': 14.8}
self.outputs = {'Out': self.inputs['X'] + self.attrs['step']}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out')
class TestIncrementOpNegativeStep(OpTest):
"""Test increment op with negative step
"""
def setUp(self):
self.op_type = "increment"
self.inputs = {'X': np.random.random((10, 10)).astype("float32")}
self.attrs = {'step': -3.8}
self.outputs = {'Out': self.inputs['X'] + self.attrs['step']}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out')
if __name__ == "__main__":
unittest.main()
...@@ -3,7 +3,7 @@ import numpy as np ...@@ -3,7 +3,7 @@ import numpy as np
from op_test import OpTest from op_test import OpTest
class TestMomentumOp(OpTest): class TestMomentumOp1(OpTest):
def setUp(self): def setUp(self):
self.op_type = "momentum" self.op_type = "momentum"
...@@ -12,6 +12,7 @@ class TestMomentumOp(OpTest): ...@@ -12,6 +12,7 @@ class TestMomentumOp(OpTest):
velocity = np.zeros((123, 321)).astype("float32") velocity = np.zeros((123, 321)).astype("float32")
learning_rate = np.array([0.001]).astype("float32") learning_rate = np.array([0.001]).astype("float32")
mu = 0.0001 mu = 0.0001
use_nesterov = False
self.inputs = { self.inputs = {
'Param': param, 'Param': param,
...@@ -23,7 +24,47 @@ class TestMomentumOp(OpTest): ...@@ -23,7 +24,47 @@ class TestMomentumOp(OpTest):
self.attrs = {'mu': mu} self.attrs = {'mu': mu}
velocity_out = mu * velocity + grad velocity_out = mu * velocity + grad
param_out = param - learning_rate * velocity_out if use_nesterov:
param_out = param - grad * learning_rate + \
velocity_out * mu * learning_rate
else:
param_out = param - learning_rate * velocity_out
self.outputs = {'ParamOut': param_out, 'VelocityOut': velocity_out}
def test_check_output(self):
self.check_output()
class TestMomentumOp2(OpTest):
'''Test Momentum with defaukt values for attributes
'''
def setUp(self):
self.op_type = "momentum"
param = np.random.random((123, 321)).astype("float32")
grad = np.random.random((123, 321)).astype("float32")
velocity = np.zeros((123, 321)).astype("float32")
learning_rate = np.array([0.001]).astype("float32")
mu = 0.0001
use_nesterov = True
self.inputs = {
'Param': param,
'Grad': grad,
'Velocity': velocity,
'LearningRate': learning_rate
}
self.attrs = {'mu': mu, 'useNesterov': use_nesterov}
velocity_out = mu * velocity + grad
if use_nesterov:
param_out = param - grad * learning_rate + \
velocity_out * mu * learning_rate
else:
param_out = param - learning_rate * velocity_out
self.outputs = {'ParamOut': param_out, 'VelocityOut': velocity_out} self.outputs = {'ParamOut': param_out, 'VelocityOut': velocity_out}
......
...@@ -6,7 +6,7 @@ import paddle.v2.framework.optimizer as optimizer ...@@ -6,7 +6,7 @@ import paddle.v2.framework.optimizer as optimizer
class TestOptimizer(unittest.TestCase): class TestOptimizer(unittest.TestCase):
def test_sgd_optimizer(self): def test_sgd_optimizer(self):
program = framework.g_program program = framework.Program()
block = program.global_block() block = program.global_block()
mul_x = block.create_parameter( mul_x = block.create_parameter(
dtype="float32", shape=[5, 10], lod_level=0, name="mul.x") dtype="float32", shape=[5, 10], lod_level=0, name="mul.x")
...@@ -14,7 +14,7 @@ class TestOptimizer(unittest.TestCase): ...@@ -14,7 +14,7 @@ class TestOptimizer(unittest.TestCase):
dtype="float32", shape=[10, 8], lod_level=0, name="mul.y") dtype="float32", shape=[10, 8], lod_level=0, name="mul.y")
mul_out = block.create_var( mul_out = block.create_var(
dtype="float32", shape=[5, 8], lod_level=0, name="mul.out") dtype="float32", shape=[5, 8], lod_level=0, name="mul.out")
mul_op = block.append_op( block.append_op(
type="mul", type="mul",
inputs={"X": mul_x, inputs={"X": mul_x,
"Y": mul_y}, "Y": mul_y},
...@@ -27,5 +27,47 @@ class TestOptimizer(unittest.TestCase): ...@@ -27,5 +27,47 @@ class TestOptimizer(unittest.TestCase):
self.assertEqual(sgd_op.type, "sgd") self.assertEqual(sgd_op.type, "sgd")
class TestMomentumOptimizer(unittest.TestCase):
class MockMomentum(optimizer.MomentumOptimizer):
def get_accumulators(self):
return self._accumulators
def get_velocity_str(self):
return self._velocity_acc_str
def test_momentum_optimizer(self):
program = framework.Program()
block = program.global_block()
mul_x = block.create_parameter(
dtype="float32", shape=[5, 10], lod_level=0, name="mul.x")
mul_y = block.create_var(
dtype="float32", shape=[10, 8], lod_level=0, name="mul.y")
mul_out = block.create_var(
dtype="float32", shape=[5, 8], lod_level=0, name="mul.out")
block.append_op(
type="mul",
inputs={"X": mul_x,
"Y": mul_y},
outputs={"Out": mul_out},
attrs={"x_num_col_dims": 1})
momentum_optimizer = self.MockMomentum(learning_rate=0.01, momentum=0.2)
params_grads = momentum_optimizer.create_backward_pass(mul_out)
self.assertEqual(len(params_grads), 1)
self.assertEqual(len(momentum_optimizer.get_accumulators()), 0)
opts = momentum_optimizer.create_optimization_pass(params_grads,
mul_out)
self.assertEqual(len(opts), 1)
sgd_op = opts[0]
self.assertEqual(sgd_op.type, "momentum")
# Check accumulators
accumulators = momentum_optimizer.get_accumulators()
self.assertEqual(len(accumulators), 1)
self.assertTrue(momentum_optimizer.get_velocity_str() in accumulators)
velocity_acc = accumulators[momentum_optimizer.get_velocity_str()]
self.assertEqual(len(velocity_acc), 1)
self.assertTrue(mul_x.name in velocity_acc)
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
import paddle.v2 as paddle
import paddle.v2.framework.layers as layers
import paddle.v2.framework.nets as nets
import paddle.v2.framework.core as core
import paddle.v2.framework.optimizer as optimizer
from paddle.v2.framework.framework import Program, g_program
from paddle.v2.framework.executor import Executor
import numpy as np
init_program = Program()
program = Program()
images = layers.data(
name='pixel',
shape=[1, 28, 28],
data_type='float32',
program=program,
init_program=init_program)
label = layers.data(
name='label',
shape=[1],
data_type='int32',
program=program,
init_program=init_program)
conv_pool_1 = nets.simple_img_conv_pool(
input=images,
filter_size=5,
num_filters=20,
pool_size=2,
pool_stride=2,
act="relu",
program=program,
init_program=init_program)
conv_pool_2 = nets.simple_img_conv_pool(
input=conv_pool_1,
filter_size=5,
num_filters=50,
pool_size=2,
pool_stride=2,
act="relu",
program=program,
init_program=init_program)
predict = layers.fc(input=conv_pool_2,
size=10,
act="softmax",
program=program,
init_program=init_program)
cost = layers.cross_entropy(
input=predict, label=label, program=program, init_program=init_program)
avg_cost = layers.mean(x=cost, program=program)
sgd_optimizer = optimizer.SGDOptimizer(learning_rate=0.001)
opts = sgd_optimizer.minimize(avg_cost)
BATCH_SIZE = 50
PASS_NUM = 1
train_reader = paddle.batch(
paddle.reader.shuffle(
paddle.dataset.mnist.train(), buf_size=500),
batch_size=BATCH_SIZE)
place = core.CPUPlace()
exe = Executor(place)
exe.run(init_program, feed={}, fetch_list=[])
for pass_id in range(PASS_NUM):
count = 0
for data in train_reader():
img_data = np.array(map(lambda x: x[0].reshape([1, 28, 28]),
data)).astype("float32")
y_data = np.array(map(lambda x: x[1], data)).astype("int32")
y_data = y_data.reshape([BATCH_SIZE, 1])
tensor_img = core.LoDTensor()
tensor_y = core.LoDTensor()
tensor_img.set(img_data, place)
tensor_y.set(y_data, place)
outs = exe.run(program,
feed={"pixel": tensor_img,
"label": tensor_y},
fetch_list=[avg_cost])
loss = np.array(outs[0])
if loss < 10.0:
exit(0) # if avg cost less than 10.0, we think our code is good.
exit(1)
import paddle.v2 as paddle
import paddle.v2.framework.layers as layers
import paddle.v2.framework.core as core
import paddle.v2.framework.optimizer as optimizer
from paddle.v2.framework.framework import Program, g_program
from paddle.v2.framework.executor import Executor
import numpy as np
init_program = Program()
program = Program()
image = layers.data(
name='x',
shape=[784],
data_type='float32',
program=program,
init_program=init_program)
hidden1 = layers.fc(input=image,
size=128,
act='relu',
program=program,
init_program=init_program)
hidden2 = layers.fc(input=hidden1,
size=64,
act='relu',
program=program,
init_program=init_program)
predict = layers.fc(input=hidden2,
size=10,
act='softmax',
program=program,
init_program=init_program)
label = layers.data(
name='y',
shape=[1],
data_type='int32',
program=program,
init_program=init_program)
cost = layers.cross_entropy(
input=predict, label=label, program=program, init_program=init_program)
avg_cost = layers.mean(x=cost, program=program, init_program=init_program)
sgd_optimizer = optimizer.SGDOptimizer(learning_rate=0.001)
opts = sgd_optimizer.minimize(avg_cost)
BATCH_SIZE = 128
train_reader = paddle.batch(
paddle.reader.shuffle(
paddle.dataset.mnist.train(), buf_size=8192),
batch_size=BATCH_SIZE)
place = core.CPUPlace()
exe = Executor(place)
exe.run(init_program, feed={}, fetch_list=[])
PASS_NUM = 100
for pass_id in range(PASS_NUM):
for data in train_reader():
x_data = np.array(map(lambda x: x[0], data)).astype("float32")
y_data = np.array(map(lambda x: x[1], data)).astype("int32")
y_data = np.expand_dims(y_data, axis=1)
tensor_x = core.LoDTensor()
tensor_x.set(x_data, place)
tensor_y = core.LoDTensor()
tensor_y.set(y_data, place)
outs = exe.run(program,
feed={'x': tensor_x,
'y': tensor_y},
fetch_list=[avg_cost])
out = np.array(outs[0])
if out[0] < 5.0:
exit(0) # if avg cost less than 5.0, we think our code is good.
exit(1)
...@@ -132,15 +132,15 @@ class RecurrentOpTest(unittest.TestCase): ...@@ -132,15 +132,15 @@ class RecurrentOpTest(unittest.TestCase):
# create RNNOp # create RNNOp
self.rnnop = RecurrentOp( self.rnnop = RecurrentOp(
# inputs # inputs
inlinks=["x"], inputs=["x"],
boot_memories=["h_boot"], initial_states=["h_boot"],
step_net="stepnet", step_net="stepnet",
# outputs # outputs
outlinks=["h@mem"], outputs=["h@mem"],
step_scopes="step_scopes", step_scopes="step_scopes",
# attributes # attributes
pre_memories=["h@pre"], ex_states=["h@pre"],
memories=["h@mem"]) states=["h@mem"])
def create_step_net(self): def create_step_net(self):
stepnet = core.Net.create() stepnet = core.Net.create()
...@@ -169,15 +169,15 @@ class RecurrentGradientOpTest(unittest.TestCase): ...@@ -169,15 +169,15 @@ class RecurrentGradientOpTest(unittest.TestCase):
def create_forward_op(self): def create_forward_op(self):
self.forward_op = RecurrentOp( self.forward_op = RecurrentOp(
# inputs # inputs
inlinks=["x"], inputs=["x"],
boot_memories=["h_boot"], initial_states=["h_boot"],
step_net="stepnet", step_net="stepnet",
# outputs # outputs
outlinks=["h"], outputs=["h"],
step_scopes="step_scopes", step_scopes="step_scopes",
# attributes # attributes
pre_memories=["h@pre"], ex_states=["h@pre"],
memories=["h@alias"]) states=["h@alias"])
# create a stepnet for RNN # create a stepnet for RNN
stepnet = core.Net.create() stepnet = core.Net.create()
......
...@@ -46,7 +46,7 @@ class TestRmspropOp1(OpTest): ...@@ -46,7 +46,7 @@ class TestRmspropOp1(OpTest):
class TestRmspropOp2(OpTest): class TestRmspropOp2(OpTest):
'''Test RMSProp with defaukt values for attributes '''Test RMSProp with default values for attributes
''' '''
def setUp(self): def setUp(self):
......
...@@ -19,7 +19,7 @@ class TestUniformRandomOp(unittest.TestCase): ...@@ -19,7 +19,7 @@ class TestUniformRandomOp(unittest.TestCase):
op = Operator( op = Operator(
"uniform_random", "uniform_random",
Out='X', Out='X',
dims=[1000, 784], shape=[1000, 784],
min=-5.0, min=-5.0,
max=10.0, max=10.0,
seed=10) seed=10)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册