diff --git a/doc/fluid/howto/optimization/timeline_cn.md b/doc/fluid/howto/optimization/timeline_cn.md
index 5d061e1c00d2ca0194153730a39486b8357fa5b0..faf39f276dbddcd4961407ba2d082c9826051cbe 100644
--- a/doc/fluid/howto/optimization/timeline_cn.md
+++ b/doc/fluid/howto/optimization/timeline_cn.md
@@ -1,21 +1,27 @@
# 如何使用timeline工具做性能分析
-1. 在训练的主循环外加上`with profiler.profiler(...)`。运行之后,代码会在`/tmp/profile`目录下生成一个profile的记录文件。
+1. 在训练的主循环外加上`profiler.start_profiler(...)`和`profiler.stop_profiler(...)`。运行之后,代码会在`/tmp/profile`目录下生成一个profile的记录文件。
**提示:**
请不要在timeline记录信息时运行太多次迭代,因为timeline中的记录数量和迭代次数是成正比的。
```python
- with profiler.profiler('All', 'total', '/tmp/profile') as prof:
- for pass_id in range(pass_num):
- for batch_id, data in enumerate(train_reader()):
- exe.run(fluid.default_main_program(),
- feed=feeder.feed(data),
- fetch_list=[])
+ for pass_id in range(pass_num):
+ for batch_id, data in enumerate(train_reader()):
+ if pass_id == 0 and batch_id == 5:
+ profiler.start_profiler("All")
+ elif pass_id == 0 and batch_id == 10:
+ profiler.stop_profiler("total", "/tmp/profile")
+ exe.run(fluid.default_main_program(),
+ feed=feeder.feed(data),
+ fetch_list=[])
...
```
1. 运行`python paddle/tools/timeline.py`来处理`/tmp/profile`,这个程序默认会生成一个`/tmp/timeline`文件,你也可以用命令行参数来修改这个路径,请参考[timeline.py](https://github.com/PaddlePaddle/Paddle/blob/develop/tools/timeline.py)。
+```python
+python Paddle/tools/timeline.py --profile_path=/tmp/profile --timeline_path=timeline
+```
1. 打开chrome浏览器,访问,用`load`按钮来加载生成的`timeline`文件。
diff --git a/doc/fluid/howto/optimization/timeline_en.md b/doc/fluid/howto/optimization/timeline_en.md
index 96481ae2a6e4442d40803f8d5361e5f942502df3..6f963c6b4da6967fb2f493ada917a4b08917fa4c 100644
--- a/doc/fluid/howto/optimization/timeline_en.md
+++ b/doc/fluid/howto/optimization/timeline_en.md
@@ -1,15 +1,17 @@
# how to use timeline tool to do profile
-1. Add `with profiler.profiler(...)` to the main training loop. After run, the code will generate a profile record file `/tmp/profile`. **Warning**: Please do not run too many batches when use profiler to record timeline information, for the profile record will grow with the batch number.
+1. Add `profiler.start_profiler(...)`和`profiler.stop_profiler(...)` to the main training loop. After run, the code will generate a profile record file `/tmp/profile`. **Warning**: Please do not run too many batches when use profiler to record timeline information, for the profile record will grow with the batch number.
```python
- with profiler.profiler('All', 'total', '/tmp/profile') as prof:
- for pass_id in range(pass_num):
- for batch_id, data in enumerate(train_reader()):
- exe.run(fluid.default_main_program(),
- feed=feeder.feed(data),
- fetch_list=[],
- use_program_cache=True)
+ for pass_id in range(pass_num):
+ for batch_id, data in enumerate(train_reader()):
+ if pass_id == 0 and batch_id == 5:
+ profiler.start_profiler("All")
+ elif pass_id == 0 and batch_id == 10:
+ profiler.stop_profiler("total", "/tmp/profile")
+ exe.run(fluid.default_main_program(),
+ feed=feeder.feed(data),
+ fetch_list=[])
...
```
@@ -17,6 +19,10 @@
file `/tmp/timeline` by default. You can change the path by cmd parameter, please take a look at
[timeline.py](https://github.com/PaddlePaddle/Paddle/blob/develop/tools/timeline.py) for details.
+```python
+python Paddle/tools/timeline.py --profile_path=/tmp/profile --timeline_path=timeline
+```
+
1. Open chrome and visit , use `load` button to load the generated `timeline` file.
![chrome tracing](./tracing.jpeg)
diff --git a/paddle/fluid/framework/details/all_reduce_op_handle.cc b/paddle/fluid/framework/details/all_reduce_op_handle.cc
index 700c73c745bad72637d77385f5cd38c494501c86..bf493a3fa44e48deec734250d04b2a413c3ed9da 100644
--- a/paddle/fluid/framework/details/all_reduce_op_handle.cc
+++ b/paddle/fluid/framework/details/all_reduce_op_handle.cc
@@ -17,6 +17,7 @@
#include "paddle/fluid/framework/details/container_cast.h"
#include "paddle/fluid/framework/details/reduce_and_gather.h"
#include "paddle/fluid/framework/details/variable_visitor.h"
+#include "paddle/fluid/platform/profiler.h"
namespace paddle {
namespace framework {
@@ -45,6 +46,7 @@ AllReduceOpHandle::AllReduceOpHandle(ir::Node *node,
#endif
void AllReduceOpHandle::RunImpl() {
+ platform::RecordEvent r("all_reduce", nullptr);
if (NoDummyInputSize() == 1) {
return; // No need to all reduce when GPU count = 1;
} else {
diff --git a/paddle/fluid/framework/details/reduce_op_handle.cc b/paddle/fluid/framework/details/reduce_op_handle.cc
index 7160e346dad0615e2fd32b70c096880af0359e1a..68bdfbaf52120d19d05d156529626f42adda630d 100644
--- a/paddle/fluid/framework/details/reduce_op_handle.cc
+++ b/paddle/fluid/framework/details/reduce_op_handle.cc
@@ -16,12 +16,14 @@
#include "paddle/fluid/framework/details/container_cast.h"
#include "paddle/fluid/framework/details/reduce_and_gather.h"
#include "paddle/fluid/framework/details/variable_visitor.h"
+#include "paddle/fluid/platform/profiler.h"
namespace paddle {
namespace framework {
namespace details {
void ReduceOpHandle::RunImpl() {
+ platform::RecordEvent r("reduce", nullptr);
if (places_.size() == 1) return;
// the input and output may have dummy var.
auto in_var_handles = DynamicCast(inputs_);
diff --git a/paddle/fluid/framework/details/scope_buffered_ssa_graph_executor.cc b/paddle/fluid/framework/details/scope_buffered_ssa_graph_executor.cc
index 1d80bab90f513139f807b57258177c6b2ac53ac0..5bd974d6b789a2f085c0a69de5e133187342f587 100644
--- a/paddle/fluid/framework/details/scope_buffered_ssa_graph_executor.cc
+++ b/paddle/fluid/framework/details/scope_buffered_ssa_graph_executor.cc
@@ -17,6 +17,7 @@
#include
#include
#include "paddle/fluid/framework/executor.h"
+#include "paddle/fluid/platform/profiler.h"
namespace paddle {
namespace framework {
@@ -62,6 +63,7 @@ FeedFetchList ScopeBufferedSSAGraphExecutor::Run(
eptr = std::current_exception();
}
+ platform::RecordEvent e("ScopeBufferedSSAGraphExecutorAfterRun", nullptr);
drop_scope_counter_ += 1;
if (!fetch_tensors.empty() ||
drop_scope_counter_ == strategy_.num_iteration_per_drop_scope_) {
diff --git a/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc b/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc
index e556c84b0219eba2b92c456c205e03947171626b..0eaf9a9c951991a5775604eb8d0e7535f81a4ae2 100644
--- a/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc
+++ b/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc
@@ -15,6 +15,7 @@
#include "paddle/fluid/framework/details/threaded_ssa_graph_executor.h"
#include "paddle/fluid/framework/details/ssa_graph_builder.h"
+#include "paddle/fluid/platform/profiler.h"
namespace paddle {
namespace framework {
@@ -34,6 +35,8 @@ ThreadedSSAGraphExecutor::ThreadedSSAGraphExecutor(
FeedFetchList ThreadedSSAGraphExecutor::Run(
const std::vector &fetch_tensors) {
+ std::unique_ptr event(
+ new platform::RecordEvent("ThreadedSSAGraphExecutorPrepare", nullptr));
std::unordered_map pending_ops;
std::unordered_set pending_vars;
BlockingQueue ready_vars;
@@ -84,6 +87,7 @@ FeedFetchList ThreadedSSAGraphExecutor::Run(
// Clean run context
run_op_futures_.clear();
exception_holder_.Clear();
+ event.reset(nullptr);
// Step 3. Execution
while (!pending_vars.empty()) {
diff --git a/paddle/fluid/framework/operator.cc b/paddle/fluid/framework/operator.cc
index cdac00739bc48648b41751e644a953d0d310ffbf..0c8acf71bfa0814e66560258ad6131c743ebc81b 100644
--- a/paddle/fluid/framework/operator.cc
+++ b/paddle/fluid/framework/operator.cc
@@ -136,6 +136,8 @@ void OperatorBase::Run(const Scope& scope, const platform::Place& place) {
platform::SetDeviceId(dev_id);
#endif
}
+ platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
+ platform::RecordEvent record_event(Type(), pool.Get(place));
RunImpl(scope, place);
VLOG(10) << "+ " << DebugStringEx(&scope);
}
@@ -639,9 +641,6 @@ void OperatorWithKernel::RunImpl(const Scope& scope,
platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
auto* dev_ctx = pool.Get(place);
- // For profiling, don't move out of this function because that will result
- // in the failure of multi-GPU profiling.
- platform::RecordEvent record_event(Type(), dev_ctx);
// check if op[type] has kernel registered.
auto& all_op_kernels = AllOpKernels();
auto kernels_iter = all_op_kernels.find(type_);
diff --git a/paddle/fluid/inference/api/CMakeLists.txt b/paddle/fluid/inference/api/CMakeLists.txt
index 259d79bedbf664f52b1189ca71567665a6d91180..08d0f493ab30d92a121d089d9003bc575429b4dd 100644
--- a/paddle/fluid/inference/api/CMakeLists.txt
+++ b/paddle/fluid/inference/api/CMakeLists.txt
@@ -74,9 +74,10 @@ if (WITH_ANAKIN) # only needed in CI
target_link_libraries(inference_anakin_api anakin anakin_saber_common)
target_link_libraries(inference_anakin_api_shared anakin anakin_saber_common)
if (WITH_TESTING)
- cc_test(inference_anakin_test SRCS api_anakin_engine_tester.cc
- ARGS --model=${ANAKIN_INSTALL_DIR}/mobilenet_v2.anakin.bin
- DEPS inference_anakin_api_shared)
- target_compile_options(inference_anakin_test BEFORE PUBLIC ${ANAKIN_COMPILE_EXTRA_FLAGS})
+ # this test is unstable, disable it first.
+ #cc_test(inference_anakin_test SRCS api_anakin_engine_tester.cc
+ #ARGS --model=${ANAKIN_INSTALL_DIR}/mobilenet_v2.anakin.bin
+ #DEPS inference_anakin_api_shared)
+ #target_compile_options(inference_anakin_test BEFORE PUBLIC ${ANAKIN_COMPILE_EXTRA_FLAGS})
endif(WITH_TESTING)
endif()
diff --git a/paddle/fluid/operators/feed_op.cc b/paddle/fluid/operators/feed_op.cc
index bcb3e63ed7dbc775c1de6c4522f0548ea48a6cf0..dc7ef664958238ddbd48745bd59cc7db28e49f5b 100644
--- a/paddle/fluid/operators/feed_op.cc
+++ b/paddle/fluid/operators/feed_op.cc
@@ -31,7 +31,6 @@ class FeedOp : public framework::OperatorBase {
const platform::Place &place) const override {
// get device context from pool
auto *dev_ctx = platform::DeviceContextPool::Instance().Get(place);
- platform::RecordEvent record_event(Type(), dev_ctx);
auto feed_var_name = Input("X");
auto *feed_var = scope.FindVar(feed_var_name);
diff --git a/paddle/fluid/operators/fetch_barrier_op.cc b/paddle/fluid/operators/fetch_barrier_op.cc
index 680fde19eefe57475b7526ebc29d4ff977a16977..d9cd956dfdff3d009d38ee5088f5396080580483 100644
--- a/paddle/fluid/operators/fetch_barrier_op.cc
+++ b/paddle/fluid/operators/fetch_barrier_op.cc
@@ -36,12 +36,6 @@ class FetchBarrierOp : public framework::OperatorBase {
void RunImpl(const framework::Scope& scope,
const platform::Place& place) const override {
std::vector eps = Attr>("endpoints");
-
- platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
- auto& ctx = *pool.Get(place);
- // For profiling
- platform::RecordEvent record_event(Type(), &ctx);
-
distributed::RPCClient* rpc_client =
distributed::RPCClient::GetInstance();
diff --git a/paddle/fluid/operators/fetch_op.cc b/paddle/fluid/operators/fetch_op.cc
index 1640a2a22c69a0e3ab81a2889d6105b2cf4162b7..c197b45e8196a47def6465128e8ca39d8daefed6 100644
--- a/paddle/fluid/operators/fetch_op.cc
+++ b/paddle/fluid/operators/fetch_op.cc
@@ -30,9 +30,6 @@ class FetchOp : public framework::OperatorBase {
private:
void RunImpl(const framework::Scope &scope,
const platform::Place &place) const override {
- platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance();
- platform::RecordEvent record_event(Type(), pool.Get(place));
-
auto fetch_var_name = Input("X");
auto *fetch_var = scope.FindVar(fetch_var_name);
PADDLE_ENFORCE(fetch_var != nullptr,
diff --git a/paddle/fluid/operators/load_op.cc b/paddle/fluid/operators/load_op.cc
index ac35cf0b89bfaa0c0f8e64445f18a3bbd478e70a..27e26cb1b5c1e831f05dac299489628b92eaa58c 100644
--- a/paddle/fluid/operators/load_op.cc
+++ b/paddle/fluid/operators/load_op.cc
@@ -31,9 +31,6 @@ class LoadOp : public framework::OperatorBase {
private:
void RunImpl(const framework::Scope &scope,
const platform::Place &place) const override {
- auto *dev_ctx = platform::DeviceContextPool::Instance().Get(place);
- platform::RecordEvent record_event(Type(), dev_ctx);
-
// FIXME(yuyang18): We save variable to local file now, but we should change
// it to save an output stream.
auto filename = Attr("file_path");
diff --git a/paddle/fluid/operators/lookup_table_op.cc b/paddle/fluid/operators/lookup_table_op.cc
index 3e8f3ec5c5cd683343bcbdfc2388bd37c25e00f9..d77b095c5d783a2a9fab87eb8b458117a6a3d225 100644
--- a/paddle/fluid/operators/lookup_table_op.cc
+++ b/paddle/fluid/operators/lookup_table_op.cc
@@ -32,11 +32,16 @@ class LookupTableOp : public framework::OperatorWithKernel {
auto table_dims = ctx->GetInputDim("W");
auto ids_dims = ctx->GetInputDim("Ids");
+ int ids_rank = ids_dims.size();
- PADDLE_ENFORCE_EQ(ids_dims.size(), 2);
- PADDLE_ENFORCE_EQ(ids_dims[1], 1);
+ PADDLE_ENFORCE_EQ(table_dims.size(), 2);
+ PADDLE_ENFORCE_EQ(ids_dims[ids_rank - 1], 1,
+ "The last dimension of the 'Ids' tensor must be 1.");
- ctx->SetOutputDim("Out", {ids_dims[0], table_dims[1]});
+ auto output_dims =
+ framework::vectorize(framework::slice_ddim(ids_dims, 0, ids_rank - 1));
+ output_dims.push_back(table_dims[1]);
+ ctx->SetOutputDim("Out", framework::make_ddim(output_dims));
if (ctx->GetOutputsVarType("Out")[0] ==
framework::proto::VarType::LOD_TENSOR) {
@@ -61,8 +66,7 @@ class LookupTableOpMaker : public framework::OpProtoAndCheckerMaker {
AddInput("Ids",
"An input with type int32 or int64 "
"contains the ids to be looked up in W. "
- "Ids must be a column vector with rank = 2. "
- "The 2nd dimension size must be 1.");
+ "The last dimension size must be 1.");
AddOutput("Out", "The lookup results, which have the same type as W.");
AddAttr("is_sparse",
"(boolean, default false) "
diff --git a/paddle/fluid/operators/lookup_table_op.cu b/paddle/fluid/operators/lookup_table_op.cu
index 27483372b93a850d313445386c7973838c4a0710..74823dab09cac358f647c074ac2f2ee2fed17e55 100644
--- a/paddle/fluid/operators/lookup_table_op.cu
+++ b/paddle/fluid/operators/lookup_table_op.cu
@@ -118,28 +118,31 @@ class LookupTableGradCUDAKernel : public framework::OpKernel {
auto *d_table = context.Output(framework::GradVarName("W"));
auto *ids_data = ids->data();
- auto ids_dim = ids->dims();
+ int64_t ids_num = ids->numel();
auto stream = dev_ctx.stream();
// copy GPU memory to CPU pinned memory
framework::Vector new_rows;
- new_rows.resize(ids_dim[0]);
+ new_rows.resize(ids_num);
auto gpu_place = boost::get(context.GetPlace());
// TODO(yuyang18): Strange code here.
memory::Copy(platform::CPUPlace(),
new_rows.CUDAMutableData(context.GetPlace()), gpu_place,
- ids_data, ids_dim[0] * sizeof(int64_t), stream);
+ ids_data, ids_num * sizeof(int64_t), stream);
d_table->set_rows(new_rows);
auto *d_table_value = d_table->mutable_value();
- d_table_value->Resize({ids_dim[0], table->dims()[1]});
+ d_table_value->Resize({ids_num, table->dims()[1]});
d_table_value->mutable_data(context.GetPlace());
auto *d_table_data = d_table_value->data();
auto *d_output_data = d_output->data();
- PADDLE_ENFORCE_EQ(d_table_value->dims(), d_output->dims());
+ auto d_output_dims = d_output->dims();
+ PADDLE_ENFORCE_EQ(
+ d_table_value->dims(),
+ framework::flatten_to_2d(d_output_dims, d_output_dims.size() - 1));
memory::Copy(gpu_place, d_table_data, gpu_place, d_output_data,
d_output->numel() * sizeof(T), stream);
diff --git a/paddle/fluid/operators/lookup_table_op.h b/paddle/fluid/operators/lookup_table_op.h
index c9f074ca0e8dafb374dc9368165df5af5053a6b8..f5c10ced8305b64c6386c5051804f8c9a8f71802 100644
--- a/paddle/fluid/operators/lookup_table_op.h
+++ b/paddle/fluid/operators/lookup_table_op.h
@@ -109,17 +109,17 @@ class LookupTableGradKernel : public framework::OpKernel {
auto *d_table = context.Output(framework::GradVarName("W"));
auto *ids_data = ids->data();
- auto ids_dim = ids->dims();
+ int64_t ids_num = ids->numel();
framework::Vector new_rows;
- new_rows.reserve(ids_dim[0]);
- for (int64_t i = 0; i < ids_dim[0]; i++) {
+ new_rows.reserve(ids_num);
+ for (int64_t i = 0; i < ids_num; i++) {
new_rows.push_back(ids_data[i]);
}
d_table->set_rows(new_rows);
auto *d_table_value = d_table->mutable_value();
- d_table_value->Resize({ids_dim[0], table_dim[1]});
+ d_table_value->Resize({ids_num, table_dim[1]});
d_table_value->mutable_data(context.GetPlace());
d_table->set_height(table_dim[0]);
@@ -127,7 +127,10 @@ class LookupTableGradKernel : public framework::OpKernel {
auto *d_output_data = d_output->data();
auto *d_table_data = d_table_value->data();
- PADDLE_ENFORCE_EQ(d_table_value->dims(), d_output->dims());
+ auto d_output_dims = d_output->dims();
+ PADDLE_ENFORCE_EQ(
+ d_table_value->dims(),
+ framework::flatten_to_2d(d_output_dims, d_output_dims.size() - 1));
memcpy(d_table_data, d_output_data, sizeof(T) * d_output->numel());
} else {
auto *ids = context.Input("Ids");
@@ -135,10 +138,9 @@ class LookupTableGradKernel : public framework::OpKernel {
auto *d_table = context.Output(framework::GradVarName("W"));
auto *ids_data = ids->data();
- auto ids_dim = ids->dims();
int N = table_dim[0];
- int D = d_output->dims()[1];
+ int D = table_dim[1];
auto *d_output_data = d_output->data();
auto *d_table_data = d_table->mutable_data(context.GetPlace());
diff --git a/paddle/fluid/operators/parallel_do_op.cc b/paddle/fluid/operators/parallel_do_op.cc
index c9744db3d0654ef63357963d9a9a3cb946f56e2d..916cdad3fd288d1f3ffb19bc769ab827dd1e9103 100644
--- a/paddle/fluid/operators/parallel_do_op.cc
+++ b/paddle/fluid/operators/parallel_do_op.cc
@@ -18,7 +18,6 @@ limitations under the License. */
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/threadpool.h"
#include "paddle/fluid/operators/detail/safe_ref.h"
-#include "paddle/fluid/platform/profiler.h"
namespace paddle {
namespace operators {
@@ -166,8 +165,6 @@ class ParallelDoOp : public framework::OperatorBase {
workers.emplace_back(
framework::Async([program, cur_scope, place, block, place_idx] {
- // Give the thread an id to distinguish parallel block with same id.
- platform::RecordThread rt(static_cast(place_idx) + 1);
framework::Executor executor(place);
executor.Run(*program, cur_scope, block->ID(),
false /*create_local_scope*/);
@@ -244,8 +241,6 @@ class ParallelDoGradOp : public framework::OperatorBase {
// execute
workers.emplace_back(
framework::Async([program, cur_scope, place, block, i] {
- // Give the thread an id to distinguish parallel block with same id.
- platform::RecordThread rt(static_cast(i) + 1);
framework::Executor executor(place);
executor.Run(*program, cur_scope, block->ID(),
false /*create_local_scope*/);
diff --git a/paddle/fluid/operators/recv_op.cc b/paddle/fluid/operators/recv_op.cc
index 1ba684014904e61a86bebacd7d29d7e10d313092..4a6ce938a5f337d035b21f562d46daf606236db0 100644
--- a/paddle/fluid/operators/recv_op.cc
+++ b/paddle/fluid/operators/recv_op.cc
@@ -40,8 +40,6 @@ class RecvOp : public framework::OperatorBase {
platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
auto& ctx = *pool.Get(place);
- // For profiling
- platform::RecordEvent record_event(Type(), &ctx);
distributed::RPCClient* rpc_client =
distributed::RPCClient::GetInstance();
diff --git a/paddle/fluid/operators/send_barrier_op.cc b/paddle/fluid/operators/send_barrier_op.cc
index d7f8e994afd7e656bd5a9dd7c5ab45f0d52fe88b..1866a86048acbefadcb4d82cd6309cd16f0352d6 100644
--- a/paddle/fluid/operators/send_barrier_op.cc
+++ b/paddle/fluid/operators/send_barrier_op.cc
@@ -39,11 +39,6 @@ class SendBarrierOp : public framework::OperatorBase {
std::vector eps = Attr>("endpoints");
bool sync_mode = Attr("sync_mode");
- platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
- auto& ctx = *pool.Get(place);
- // For profiling
- platform::RecordEvent record_event(Type(), &ctx);
-
distributed::RPCClient* rpc_client =
distributed::RPCClient::GetInstance();
diff --git a/paddle/fluid/operators/send_op.cc b/paddle/fluid/operators/send_op.cc
index 829f310d4233c01a7fbb9ccf7427f6e47ce8d384..3cd42f2d059532b7090e66ce21de8e5cb014adf1 100644
--- a/paddle/fluid/operators/send_op.cc
+++ b/paddle/fluid/operators/send_op.cc
@@ -42,9 +42,6 @@ class SendOp : public framework::OperatorBase {
platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
auto& ctx = *pool.Get(place);
- // For profiling
- platform::RecordEvent record_event(Type(), &ctx);
-
distributed::RPCClient* rpc_client =
distributed::RPCClient::GetInstance();
diff --git a/paddle/fluid/operators/softmax_cudnn_op.cu.cc b/paddle/fluid/operators/softmax_cudnn_op.cu.cc
index 5596fa0648ccc151bc0d11de9c556599428a8d71..2bdb23e999621b10799b5163f326bc4b66a437e6 100644
--- a/paddle/fluid/operators/softmax_cudnn_op.cu.cc
+++ b/paddle/fluid/operators/softmax_cudnn_op.cu.cc
@@ -30,8 +30,16 @@ class SoftmaxCUDNNKernel : public framework::OpKernel {
// allocate memory on device.
Out->mutable_data(context.GetPlace());
+ auto dims = X->dims();
+ auto flattened_dims = framework::flatten_to_2d(dims, dims.size() - 1);
+ framework::LoDTensor flattened_x;
+ framework::LoDTensor flattened_out;
+ flattened_x.ShareDataWith(*X).Resize(flattened_dims);
+ flattened_out.ShareDataWith(*Out).Resize(flattened_dims);
+
math::SoftmaxCUDNNFunctor()(
- context.template device_context(), X, Out);
+ context.template device_context(),
+ &flattened_x, &flattened_out);
}
};
@@ -46,9 +54,18 @@ class SoftmaxGradCUDNNKernel : public framework::OpKernel {
// allocate memory on device.
dX->mutable_data(context.GetPlace());
+ auto dims = Out->dims();
+ auto flattened_dims = framework::flatten_to_2d(dims, dims.size() - 1);
+ framework::LoDTensor flattened_out;
+ framework::LoDTensor flattened_d_out;
+ framework::LoDTensor flattened_d_x;
+ flattened_out.ShareDataWith(*Out).Resize(flattened_dims);
+ flattened_d_out.ShareDataWith(*dOut).Resize(flattened_dims);
+ flattened_d_x.ShareDataWith(*dX).Resize(flattened_dims);
+
math::SoftmaxGradCUDNNFunctor()(
- context.template device_context(), Out,
- dOut, dX);
+ context.template device_context(),
+ &flattened_out, &flattened_d_out, &flattened_d_x);
}
};
diff --git a/paddle/fluid/operators/softmax_mkldnn_op.cc b/paddle/fluid/operators/softmax_mkldnn_op.cc
index 6668e6b9e917eea7ba4a80ac78917b73eb827208..01819f53e3ab0973f6140c5a81f18f954b6a0376 100644
--- a/paddle/fluid/operators/softmax_mkldnn_op.cc
+++ b/paddle/fluid/operators/softmax_mkldnn_op.cc
@@ -26,9 +26,9 @@ using paddle::platform::MKLDNNMemDesc;
using mkldnn::memory; // Note: paddle has also "memory" namespace
using mkldnn::primitive;
-using mkldnn::softmax_forward;
-using mkldnn::softmax_backward;
using mkldnn::prop_kind;
+using mkldnn::softmax_backward;
+using mkldnn::softmax_forward;
using mkldnn::stream;
using platform::to_void_cast;
@@ -113,17 +113,27 @@ class SoftmaxMKLDNNKernel : public paddle::framework::OpKernel {
auto mkldnn_engine = dev_ctx.GetEngine();
const Tensor* input = ctx.Input("X");
Tensor* output = ctx.Output("Out");
- PADDLE_ENFORCE(input->dims().size() == 2UL,
- "The input of softmax op must be a 2D matrix.");
- const T* input_data = input->data();
- // allocate memory for output
- T* output_data = output->mutable_data(ctx.GetPlace());
- std::vector src_tz = paddle::framework::vectorize2int(input->dims());
- std::vector dst_tz = paddle::framework::vectorize2int(output->dims());
- // MKL-DNN does support softmax over selected axis. Having 2D Tensor,
- // we will make normalization after final eg. axis: 1
- PADDLE_ENFORCE(((src_tz[0] == dst_tz[0]) && (src_tz[1] == dst_tz[1])),
- "Softmax input and output dimensions should match");
+ PADDLE_ENFORCE_EQ(
+ input->dims(), output->dims(),
+ "The shape of softmax's input and output must be identical.");
+
+ // make sure 'output' holds memory, which will be shared by
+ // 'flattened_output' later.
+ output->mutable_data(ctx.GetPlace());
+
+ // flatten input and output to 2-D matrixs
+ auto dims = input->dims(); // input and output share the same shape
+ auto flattened_dims = framework::flatten_to_2d(dims, dims.size() - 1);
+ framework::Tensor flattened_input;
+ framework::Tensor flattened_output;
+ flattened_input.ShareDataWith(*input).Resize(flattened_dims);
+ flattened_output.ShareDataWith(*output).Resize(flattened_dims);
+
+ const T* input_data = flattened_input.data();
+ T* output_data = flattened_output.mutable_data(ctx.GetPlace());
+
+ std::vector src_tz = paddle::framework::vectorize2int(flattened_dims);
+ std::vector dst_tz = src_tz;
// Same memory descriptor to be used for input and output
memory::dims softmax_tz = {src_tz[0], src_tz[1]};
// Generate keys for storing/retriving primitives for this operator
@@ -174,23 +184,34 @@ class SoftmaxMKLDNNGradKernel : public paddle::framework::OpKernel {
auto& dev_ctx = ctx.template device_context();
auto mkldnn_engine = dev_ctx.GetEngine();
const Tensor* output = ctx.Input("Out");
- const T* dst_data = output->data();
-
auto* dout = ctx.template Input(framework::GradVarName("Out"));
- const auto* diff_dst_ptr = dout->template data();
-
auto* dx =
ctx.template Output(framework::GradVarName("X"));
- T* diff_src_ptr = dx->template mutable_data(ctx.GetPlace());
- std::vector dst_tz = paddle::framework::vectorize2int(output->dims());
+ PADDLE_ENFORCE_EQ(
+ dout->dims(), dx->dims(),
+ "The shape of softmax_grad's input and output must be identical.");
+
+ // make sure 'dx' holds memory, which will be shared by 'flattened_dx'
+ // later.
+ dx->template mutable_data(ctx.GetPlace());
+
+ auto dims = dout->dims(); // input and output share the same shape
+ auto flattened_dims = framework::flatten_to_2d(dims, dims.size() - 1);
+ framework::Tensor flattened_output;
+ framework::Tensor flattened_dout;
+ framework::Tensor flattened_dx;
+ flattened_output.ShareDataWith(*output).Resize(flattened_dims);
+ flattened_dout.ShareDataWith(*dout).Resize(flattened_dims);
+ flattened_dx.ShareDataWith(*dx).Resize(flattened_dims);
+
+ const T* dst_data = flattened_output.data();
+ const T* diff_dst_ptr = flattened_dout.template data();
+ T* diff_src_ptr = flattened_dx.template mutable_data(ctx.GetPlace());
+
+ std::vector dst_tz = paddle::framework::vectorize2int(flattened_dims);
std::vector src_tz(dst_tz);
- PADDLE_ENFORCE(output->dims().size() == 2UL,
- "The input of softmax op must be a 2D matrix.");
- // MKL-DNN does support softmax over selected axis. Having 2D Tensor,
- // we will make normalization after final eg. axis: 1
- PADDLE_ENFORCE(((src_tz[0] == dst_tz[0]) && (src_tz[1] == dst_tz[1])),
- "Softmax input and output dimensions should match");
+
// Same memory descriptor to be used for input and output
memory::dims softmax_tz = {src_tz[0], src_tz[1]};
// Currently only supports NC data format
diff --git a/paddle/fluid/operators/softmax_op.cc b/paddle/fluid/operators/softmax_op.cc
index fefc7125b4de7274589670d29be4511469d5064a..bb081238820b9ee3ae095442d21cfce11f7b41e5 100644
--- a/paddle/fluid/operators/softmax_op.cc
+++ b/paddle/fluid/operators/softmax_op.cc
@@ -37,10 +37,7 @@ class SoftmaxOp : public framework::OperatorWithKernel {
PADDLE_ENFORCE(ctx->HasOutput("Out"),
"Output(Out) of SoftmaxOp should not be null.");
- auto x_dims = ctx->GetInputDim("X");
- PADDLE_ENFORCE(x_dims.size() == 2UL,
- "The input of softmax op must be a matrix.");
- ctx->SetOutputDim("Out", x_dims);
+ ctx->SetOutputDim("Out", ctx->GetInputDim("X"));
ctx->ShareLoD("X", /*->*/ "Out");
}
@@ -81,8 +78,8 @@ class SoftmaxOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() override {
AddInput("X",
- "The input tensor of softmax. "
- "2-D with shape [batch_size, input_feature_dimensions].");
+ "The input tensor of softmax, "
+ "whose last dimension is the input_feature_dimensions.");
AddOutput("Out", "The normalized values with the same shape as X.")
.Reuse("X");
AddAttr(
@@ -105,20 +102,23 @@ class SoftmaxOpMaker : public framework::OpProtoAndCheckerMaker {
AddComment(R"DOC(
Softmax Operator.
-The input of the softmax operator is a 2-D tensor with shape N x K (N is the
-batch_size, K is the dimension of input feature). The output tensor has the
-same shape as the input tensor.
+The input of the softmax operator is a tensor of any rank. The output tensor
+has the same shape as the input.
-For each row of the input tensor, the softmax operator squashes the
-K-dimensional vector of arbitrary real values to a K-dimensional vector of real
-values in the range [0, 1] that add up to 1.
+The input tensor will first be logically flattened to a 2-D matrix. The matrix's
+second dimension(row length) is as same as the last dimension of the input
+tensor, and the first dimension(column length) is the product of all other
+dimensions of the input tensor. For each row of the matrix, the softmax operator
+squashes the K-dimensional(K is the width of the matrix, which is also the size
+of the input tensor's last dimension) vector of arbitrary real values to a
+K-dimensional vector of real values in the range [0, 1] that add up to 1.
It computes the exponential of the given dimension and the sum of exponential
values of all the other dimensions in the K-dimensional vector input.
Then the ratio of the exponential of the given dimension and the sum of
exponential values of all the other dimensions is the output of the softmax
operator.
-For each row $i$ and each column $j$ in Input(X), we have:
+For each row $i$ and each column $j$ in the matrix, we have:
$$Out[i, j] = \frac{\exp(X[i, j])}{\sum_j(exp(X[i, j])}$$
)DOC");
diff --git a/paddle/fluid/operators/softmax_op.h b/paddle/fluid/operators/softmax_op.h
index 600da45a0bbb69b76d59c981e195fc03a49b0504..1205bd0587f32caae04c27ecea581fc17988507f 100644
--- a/paddle/fluid/operators/softmax_op.h
+++ b/paddle/fluid/operators/softmax_op.h
@@ -31,8 +31,16 @@ class SoftmaxKernel : public framework::OpKernel {
// allocate memory on device.
Out->mutable_data(context.GetPlace());
+ auto dims = X->dims();
+ auto flattened_dims = framework::flatten_to_2d(dims, dims.size() - 1);
+ framework::LoDTensor flattened_x;
+ framework::LoDTensor flattened_out;
+ flattened_x.ShareDataWith(*X).Resize(flattened_dims);
+ flattened_out.ShareDataWith(*Out).Resize(flattened_dims);
+
math::SoftmaxFunctor()(
- context.template device_context(), X, Out);
+ context.template device_context(), &flattened_x,
+ &flattened_out);
}
};
@@ -47,8 +55,18 @@ class SoftmaxGradKernel : public framework::OpKernel {
// allocate memory on device.
dX->mutable_data(context.GetPlace());
+ auto dims = Out->dims();
+ auto flattened_dims = framework::flatten_to_2d(dims, dims.size() - 1);
+ framework::LoDTensor flattened_out;
+ framework::LoDTensor flattened_d_out;
+ framework::LoDTensor flattened_d_x;
+ flattened_out.ShareDataWith(*Out).Resize(flattened_dims);
+ flattened_d_out.ShareDataWith(*dOut).Resize(flattened_dims);
+ flattened_d_x.ShareDataWith(*dX).Resize(flattened_dims);
+
math::SoftmaxGradFunctor()(
- context.template device_context(), Out, dOut, dX);
+ context.template device_context(), &flattened_out,
+ &flattened_d_out, &flattened_d_x);
}
};
diff --git a/paddle/fluid/platform/device_tracer.cc b/paddle/fluid/platform/device_tracer.cc
index d9e2afadaf8ec439d158e57c94d3e6e684bce116..8fa8dbd67c936439840cffa073b6fa6693dd31a1 100644
--- a/paddle/fluid/platform/device_tracer.cc
+++ b/paddle/fluid/platform/device_tracer.cc
@@ -30,9 +30,6 @@ limitations under the License. */
namespace paddle {
namespace platform {
namespace {
-// Current thread's id. Note, we don't distinguish nested threads
-// for now.
-thread_local int cur_thread_id = 0;
// Tracking the nested block stacks of each thread.
thread_local std::deque block_id_stack;
// Tracking the nested event stacks.
@@ -413,12 +410,5 @@ void SetCurBlock(int block_id) { block_id_stack.push_back(block_id); }
void ClearCurBlock() { block_id_stack.pop_back(); }
int BlockDepth() { return block_id_stack.size(); }
-
-void SetCurThread(int thread_id) { cur_thread_id = thread_id; }
-
-void ClearCurThread() { cur_thread_id = 0; }
-
-int CurThread() { return cur_thread_id; }
-
} // namespace platform
} // namespace paddle
diff --git a/paddle/fluid/platform/device_tracer.h b/paddle/fluid/platform/device_tracer.h
index 0375c7439c29d4122e8ff6b58734dad4f504b7a2..d2a571f4345b544ad5e74f4629c3967593d6d628 100644
--- a/paddle/fluid/platform/device_tracer.h
+++ b/paddle/fluid/platform/device_tracer.h
@@ -99,9 +99,5 @@ std::string CurAnnotation();
void SetCurBlock(int block_id);
void ClearCurBlock();
int BlockDepth();
-
-void SetCurThread(int thread_id);
-void ClearCurThread();
-int CurThread();
} // namespace platform
} // namespace paddle
diff --git a/paddle/fluid/platform/profiler.cc b/paddle/fluid/platform/profiler.cc
index 01de9d7041bf3eb40884e2a6295027cccfaebd2a..7c8d8a5964fa5258bebaf2c8522886ae5886ab2c 100644
--- a/paddle/fluid/platform/profiler.cc
+++ b/paddle/fluid/platform/profiler.cc
@@ -110,6 +110,8 @@ Event::Event(EventType type, std::string name, uint32_t thread_id,
has_cuda_ = dev_ctx ? platform::is_gpu_place(dev_ctx->GetPlace()) : false;
if (has_cuda_) {
auto* cuda_dev_ctx = static_cast(dev_ctx);
+ PADDLE_ENFORCE(cudaSetDevice(
+ boost::get(cuda_dev_ctx->GetPlace()).device));
PADDLE_ENFORCE(cudaGetDevice(&device_));
PADDLE_ENFORCE(cudaEventCreate(&event_));
auto stream = cuda_dev_ctx->stream();
@@ -176,6 +178,7 @@ void PopEvent(const std::string& name, const DeviceContext* dev_ctx) {
RecordEvent::RecordEvent(const std::string& name, const DeviceContext* dev_ctx)
: is_enabled_(false), start_ns_(PosixInNsec()) {
+ std::lock_guard l(profiler_mu);
if (g_state == ProfilerState::kDisabled) return;
is_enabled_ = true;
dev_ctx_ = dev_ctx;
@@ -186,11 +189,12 @@ RecordEvent::RecordEvent(const std::string& name, const DeviceContext* dev_ctx)
}
RecordEvent::~RecordEvent() {
+ std::lock_guard l(profiler_mu);
if (g_state == ProfilerState::kDisabled || !is_enabled_) return;
DeviceTracer* tracer = GetDeviceTracer();
if (tracer) {
tracer->AddCPURecords(CurAnnotation(), start_ns_, PosixInNsec(),
- BlockDepth(), CurThread());
+ BlockDepth(), g_thread_id);
}
ClearCurAnnotation();
PopEvent(name_, dev_ctx_);
@@ -198,6 +202,7 @@ RecordEvent::~RecordEvent() {
RecordBlock::RecordBlock(int block_id)
: is_enabled_(false), start_ns_(PosixInNsec()) {
+ std::lock_guard l(profiler_mu);
if (g_state == ProfilerState::kDisabled) return;
is_enabled_ = true;
SetCurBlock(block_id);
@@ -205,27 +210,18 @@ RecordBlock::RecordBlock(int block_id)
}
RecordBlock::~RecordBlock() {
+ std::lock_guard l(profiler_mu);
if (g_state == ProfilerState::kDisabled || !is_enabled_) return;
DeviceTracer* tracer = GetDeviceTracer();
if (tracer) {
// We try to put all blocks at the same nested depth in the
// same timeline lane. and distinguish the using thread_id.
tracer->AddCPURecords(name_, start_ns_, PosixInNsec(), BlockDepth(),
- CurThread());
+ g_thread_id);
}
ClearCurBlock();
}
-RecordThread::RecordThread(int thread_id) {
- if (g_state == ProfilerState::kDisabled) return;
- SetCurThread(thread_id);
-}
-
-RecordThread::~RecordThread() {
- if (g_state == ProfilerState::kDisabled) return;
- ClearCurThread();
-}
-
void EnableProfiler(ProfilerState state) {
PADDLE_ENFORCE(state != ProfilerState::kDisabled,
"Can't enbale profling, since the input state is ",
diff --git a/paddle/fluid/platform/profiler.h b/paddle/fluid/platform/profiler.h
index bf43925373a12cd9ff2155d68c42d0266ba4df60..c99d9c807d1bfb45d1ce0725b84b9fff09049511 100644
--- a/paddle/fluid/platform/profiler.h
+++ b/paddle/fluid/platform/profiler.h
@@ -95,11 +95,6 @@ struct RecordBlock {
uint64_t start_ns_;
};
-struct RecordThread {
- explicit RecordThread(int thread_id);
- ~RecordThread();
-};
-
// Return the event list of all threads. Assumed the returned value calls
// event_lists, event_lists[i][j] represents the j-th Event of i-th thread.
std::vector> GetAllEvents();
diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py
index 058acd4a50ef54cea724a742d40eaca8f569a21c..12e7170fc3da83071f4a23b6c39463d8c2543391 100644
--- a/python/paddle/fluid/layers/nn.py
+++ b/python/paddle/fluid/layers/nn.py
@@ -1313,13 +1313,16 @@ def sequence_softmax(input, param_attr=None, bias_attr=None, use_cudnn=True):
def softmax(input, param_attr=None, bias_attr=None, use_cudnn=True, name=None):
"""
- The input of the softmax layer is a 2-D tensor with shape N x K (N is the
- batch_size, K is the dimension of input feature). The output tensor has the
- same shape as the input tensor.
+ The input of the softmax operator is a tensor of any rank. The output tensor
+ has the same shape as the input.
- For each row of the input tensor, the softmax operator squashes the
- K-dimensional vector of arbitrary real values to a K-dimensional vector of real
- values in the range [0, 1] that add up to 1.
+ The input tensor will first be logically flattened to a 2-D matrix. The matrix's
+ second dimension(row length) is as same as the last dimension of the input
+ tensor, and the first dimension(column length) is the product of all other
+ dimensions of the input tensor. For each row of the matrix, the softmax operator
+ squashes the K-dimensional(K is the width of the matrix, which is also the size
+ of the input tensor's last dimension) vector of arbitrary real values to a
+ K-dimensional vector of real values in the range [0, 1] that add up to 1.
It computes the exponential of the given dimension and the sum of exponential
values of all the other dimensions in the K-dimensional vector input.
@@ -1327,7 +1330,7 @@ def softmax(input, param_attr=None, bias_attr=None, use_cudnn=True, name=None):
exponential values of all the other dimensions is the output of the softmax
operator.
- For each row :math:`i` and each column :math:`j` in Input(X), we have:
+ For each row :math:`i` and each column :math:`j` in the matrix, we have:
.. math::
diff --git a/python/paddle/fluid/tests/unittests/CMakeLists.txt b/python/paddle/fluid/tests/unittests/CMakeLists.txt
index c8e881a672ad25654bd28604abfafc2c569af7ca..a6a911721dfa31e5fb8d57645071af42adc968be 100644
--- a/python/paddle/fluid/tests/unittests/CMakeLists.txt
+++ b/python/paddle/fluid/tests/unittests/CMakeLists.txt
@@ -50,6 +50,8 @@ list(REMOVE_ITEM TEST_OPS test_parallel_executor_crf)
list(REMOVE_ITEM TEST_OPS test_parallel_executor_fetch_feed)
list(REMOVE_ITEM TEST_OPS test_dist_se_resnext)
list(REMOVE_ITEM TEST_OPS test_dist_transformer)
+list(REMOVE_ITEM TEST_OPS test_parallel_executor_transformer)
+list(REMOVE_ITEM TEST_OPS test_image_classification_resnet)
foreach(TEST_OP ${TEST_OPS})
py_test_modules(${TEST_OP} MODULES ${TEST_OP})
endforeach(TEST_OP)
@@ -64,3 +66,5 @@ py_test_modules(test_parallel_executor_crf MODULES test_parallel_executor_crf SE
py_test_modules(test_parallel_executor_fetch_feed MODULES test_parallel_executor_fetch_feed SERIAL)
py_test_modules(test_dist_transformer MODULES test_dist_transformer SERIAL)
py_test_modules(test_dist_se_resnext MODULES test_dist_se_resnext SERIAL)
+py_test_modules(test_parallel_executor_transformer MODULES test_parallel_executor_transformer SERIAL)
+py_test_modules(test_image_classification_resnet MODULES test_image_classification_resnet SERIAL)
diff --git a/python/paddle/fluid/tests/unittests/test_lookup_table_op.py b/python/paddle/fluid/tests/unittests/test_lookup_table_op.py
index e16ab1d15f165bd0efa1b7d51add36c3020a1910..ad0d555198c36c12fd1cc39c41d39b24b40f64c3 100644
--- a/python/paddle/fluid/tests/unittests/test_lookup_table_op.py
+++ b/python/paddle/fluid/tests/unittests/test_lookup_table_op.py
@@ -35,6 +35,22 @@ class TestLookupTableOp(OpTest):
self.check_grad(['W'], 'Out', no_grad_set=set('Ids'))
+class TestLookupTableOpWithTensorIds(OpTest):
+ def setUp(self):
+ self.op_type = "lookup_table"
+ table = np.random.random((17, 31)).astype("float32")
+ ids = np.random.randint(
+ low=0, high=17, size=(2, 4, 5, 1)).astype("int64")
+ self.inputs = {'W': table, 'Ids': ids}
+ self.outputs = {'Out': table[ids.flatten()].reshape((2, 4, 5, 31))}
+
+ def test_check_output(self):
+ self.check_output()
+
+ def test_check_grad(self):
+ self.check_grad(['W'], 'Out', no_grad_set=set('Ids'))
+
+
class TestLookupTableOpWithPadding(TestLookupTableOp):
def test_check_output(self):
ids = np.squeeze(self.inputs['Ids'])
@@ -44,21 +60,34 @@ class TestLookupTableOpWithPadding(TestLookupTableOp):
self.check_output()
def test_check_grad(self):
- # Since paddings are not trainable and fixed in forward, the gradient of
+ # Since paddings are not trainable and fixed in forward, the gradient of
# paddings makes no sense and we don't test the gradient here.
pass
-class TestLookupTableWIsSelectedRows(OpTest):
- def check_with_place(self, place):
- scope = core.Scope()
+class TestLookupTableOpWithTensorIdsAndPadding(TestLookupTableOpWithTensorIds):
+ def test_check_output(self):
+ ids = self.inputs['Ids']
+ flatten_idx = ids.flatten()
+ padding_idx = np.random.choice(flatten_idx, 1)[0]
+ self.outputs['Out'][np.squeeze(ids == padding_idx)] = np.zeros(31)
+ self.attrs = {'padding_idx': long(padding_idx)}
+ self.check_output()
+
+ def test_check_grad(self):
+ # Since paddings are not trainable and fixed in forward, the gradient of
+ # paddings makes no sense and we don't test the gradient here.
+ pass
- # create and initialize Id Variable
+
+class TestLookupTableWIsSelectedRows(OpTest):
+ def prepare_ids(self, scope, place):
ids_tensor = scope.var('Ids').get_tensor()
ids_array = np.array([[0], [4], [3], [5]]).astype("int64")
ids_tensor.set(ids_array, place)
+ return ids_array
- # create and initialize W Variable
+ def prepare_w(self, scope, place):
rows = [0, 1, 2, 3, 4, 5, 6]
row_numel = 12
@@ -71,8 +100,22 @@ class TestLookupTableWIsSelectedRows(OpTest):
w_tensor = w_selected_rows.get_tensor()
w_tensor.set(w_array, place)
- # create Out Variable
- out_tensor = scope.var('Out').get_tensor()
+ def create_out_tensor(self, scope, place):
+ return scope.var('Out').get_tensor()
+
+ def check_result(self, ids_array, result_array):
+ # all(): return True if all elements of the iterable are true (or if the iterable is empty)
+ for idx, row in enumerate(ids_array):
+ assert (row[0] == result_array[idx]).all()
+
+ def check_with_place(self, place):
+ scope = core.Scope()
+
+ ids_array = self.prepare_ids(scope, place)
+
+ self.prepare_w(scope, place)
+
+ out_tensor = self.create_out_tensor(scope, place)
# create and run lookup_table operator
lookup_table = Operator("lookup_table", W='W', Ids='Ids', Out='Out')
@@ -80,9 +123,8 @@ class TestLookupTableWIsSelectedRows(OpTest):
# get result from Out
result_array = np.array(out_tensor)
- # all(): return True if all elements of the iterable are true (or if the iterable is empty)
- for idx, row in enumerate(ids_array):
- assert (row[0] == result_array[idx]).all()
+
+ self.check_result(ids_array, result_array)
def test_w_is_selected_rows(self):
places = [core.CPUPlace()]
@@ -91,5 +133,19 @@ class TestLookupTableWIsSelectedRows(OpTest):
self.check_with_place(place)
+class TestLookupTableWithTensorIdsWIsSelectedRows(
+ TestLookupTableWIsSelectedRows):
+ def prepare_ids(self, scope, place):
+ ids_tensor = scope.var('Ids').get_tensor()
+ ids_array = np.random.randint(
+ low=0, high=6, size=(2, 4, 3, 1)).astype("int64")
+ ids_tensor.set(ids_array, place)
+ return ids_array
+
+ def check_result(self, ids_array, result_array):
+ for idx, row in np.ndenumerate(ids_array):
+ assert (row == result_array[idx]).all()
+
+
if __name__ == "__main__":
unittest.main()
diff --git a/python/paddle/fluid/tests/unittests/test_softmax_op.py b/python/paddle/fluid/tests/unittests/test_softmax_op.py
index 0ab581cfb0ea0ff2205450b8e62edb8bf3c51707..70ad05597c4a160cf6a25aeb3c379320cef69c63 100644
--- a/python/paddle/fluid/tests/unittests/test_softmax_op.py
+++ b/python/paddle/fluid/tests/unittests/test_softmax_op.py
@@ -26,15 +26,22 @@ def stable_softmax(x):
class TestSoftmaxOp(OpTest):
+ def get_x_shape(self):
+ return [10, 10]
+
def setUp(self):
self.op_type = "softmax"
self.use_cudnn = False
self.use_mkldnn = False
self.dtype = np.float32
self.init_kernel_type()
+ self.shape = self.get_x_shape()
+
+ x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
+ out = np.apply_along_axis(stable_softmax, 1,
+ x.reshape([-1, self.shape[-1]]))
+ out = out.reshape(self.shape)
- x = np.random.uniform(0.1, 1, [10, 10]).astype(self.dtype)
- out = np.apply_along_axis(stable_softmax, 1, x)
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
self.outputs = {'Out': out}
self.attrs = {
@@ -63,6 +70,11 @@ class TestSoftmaxOp(OpTest):
self.check_grad(["X"], "Out", max_relative_error=0.01)
+class TestSoftmaxOp2(TestSoftmaxOp):
+ def get_x_shape(self):
+ return [2, 3, 4, 5]
+
+
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestSoftmaxCUDNNOp(TestSoftmaxOp):
@@ -70,6 +82,13 @@ class TestSoftmaxCUDNNOp(TestSoftmaxOp):
self.use_cudnn = True
+@unittest.skipIf(not core.is_compiled_with_cuda(),
+ "core is not compiled with CUDA")
+class TestSoftmaxCUDNNOp2(TestSoftmaxCUDNNOp):
+ def get_x_shape(self):
+ return [2, 3, 4, 5]
+
+
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestSoftmaxFP16Op(TestSoftmaxOp):
@@ -83,6 +102,13 @@ class TestSoftmaxFP16Op(TestSoftmaxOp):
self.check_output_with_place(place, atol=1e-3)
+@unittest.skipIf(not core.is_compiled_with_cuda(),
+ "core is not compiled with CUDA")
+class TestSoftmaxFP16Op2(TestSoftmaxFP16Op):
+ def get_x_shape(self):
+ return [2, 3, 4, 5]
+
+
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestSoftmaxFP16CUDNNOp(TestSoftmaxOp):
@@ -97,10 +123,22 @@ class TestSoftmaxFP16CUDNNOp(TestSoftmaxOp):
self.check_output_with_place(place, atol=1e-3)
+@unittest.skipIf(not core.is_compiled_with_cuda(),
+ "core is not compiled with CUDA")
+class TestSoftmaxFP16CUDNNOp2(TestSoftmaxFP16CUDNNOp):
+ def get_x_shape(self):
+ return [2, 3, 4, 5]
+
+
class TestSoftmaxMKLDNNOp(TestSoftmaxOp):
def init_kernel_type(self):
self.use_mkldnn = True
+class TestSoftmaxMKLDNNOp2(TestSoftmaxMKLDNNOp):
+ def get_x_shape(self):
+ return [2, 3, 4, 5]
+
+
if __name__ == "__main__":
unittest.main()