Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
48324c32
P
Paddle
项目概览
PaddlePaddle
/
Paddle
大约 2 年 前同步成功
通知
2325
Star
20933
Fork
5424
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
48324c32
编写于
12月 29, 2018
作者:
S
sneaxiy
浏览文件
操作
浏览文件
下载
差异文件
merge develop
test=develop
上级
8a83d699
10bedbde
变更
44
隐藏空白更改
内联
并排
Showing
44 changed file
with
1416 addition
and
485 deletion
+1416
-485
cmake/cuda.cmake
cmake/cuda.cmake
+3
-0
cmake/cudnn.cmake
cmake/cudnn.cmake
+1
-0
cmake/external/cub.cmake
cmake/external/cub.cmake
+1
-1
cmake/external/dlpack.cmake
cmake/external/dlpack.cmake
+1
-1
cmake/operators.cmake
cmake/operators.cmake
+1
-1
paddle/fluid/framework/CMakeLists.txt
paddle/fluid/framework/CMakeLists.txt
+4
-4
paddle/fluid/framework/async_executor.cc
paddle/fluid/framework/async_executor.cc
+7
-2
paddle/fluid/framework/details/all_reduce_op_handle.cc
paddle/fluid/framework/details/all_reduce_op_handle.cc
+1
-1
paddle/fluid/framework/details/execution_strategy.h
paddle/fluid/framework/details/execution_strategy.h
+1
-1
paddle/fluid/framework/details/scope_buffered_ssa_graph_executor.cc
...id/framework/details/scope_buffered_ssa_graph_executor.cc
+13
-7
paddle/fluid/framework/details/scope_buffered_ssa_graph_executor.h
...uid/framework/details/scope_buffered_ssa_graph_executor.h
+8
-0
paddle/fluid/framework/executor_thread_worker.cc
paddle/fluid/framework/executor_thread_worker.cc
+53
-0
paddle/fluid/framework/executor_thread_worker.h
paddle/fluid/framework/executor_thread_worker.h
+2
-0
paddle/fluid/framework/rw_lock.h
paddle/fluid/framework/rw_lock.h
+35
-68
paddle/fluid/framework/scope.cc
paddle/fluid/framework/scope.cc
+31
-20
paddle/fluid/framework/scope.h
paddle/fluid/framework/scope.h
+17
-3
paddle/fluid/operators/conv_cudnn_op_cache.h
paddle/fluid/operators/conv_cudnn_op_cache.h
+34
-0
paddle/fluid/operators/conv_fusion_op.cc
paddle/fluid/operators/conv_fusion_op.cc
+61
-1
paddle/fluid/operators/conv_fusion_op.cu.cc
paddle/fluid/operators/conv_fusion_op.cu.cc
+72
-31
paddle/fluid/operators/distributed/collective_server_test.cc
paddle/fluid/operators/distributed/collective_server_test.cc
+3
-2
paddle/fluid/operators/fused/CMakeLists.txt
paddle/fluid/operators/fused/CMakeLists.txt
+3
-1
paddle/fluid/operators/fused/fusion_conv_inception_op.cc
paddle/fluid/operators/fused/fusion_conv_inception_op.cc
+110
-0
paddle/fluid/operators/fused/fusion_conv_inception_op.cu
paddle/fluid/operators/fused/fusion_conv_inception_op.cu
+272
-0
paddle/fluid/platform/CMakeLists.txt
paddle/fluid/platform/CMakeLists.txt
+3
-0
paddle/fluid/platform/dynload/cudnn.cc
paddle/fluid/platform/dynload/cudnn.cc
+4
-0
paddle/fluid/platform/dynload/dynamic_loader.cc
paddle/fluid/platform/dynload/dynamic_loader.cc
+12
-0
paddle/fluid/platform/timer.cc
paddle/fluid/platform/timer.cc
+63
-0
paddle/fluid/platform/timer.h
paddle/fluid/platform/timer.h
+61
-0
paddle/fluid/platform/timer_test.cc
paddle/fluid/platform/timer_test.cc
+45
-0
paddle/fluid/pybind/pybind.cc
paddle/fluid/pybind/pybind.cc
+7
-3
paddle/testing/paddle_gtest_main.cc
paddle/testing/paddle_gtest_main.cc
+43
-10
python/paddle/fluid/__init__.py
python/paddle/fluid/__init__.py
+12
-3
python/paddle/fluid/data_feeder.py
python/paddle/fluid/data_feeder.py
+1
-2
python/paddle/fluid/framework.py
python/paddle/fluid/framework.py
+10
-14
python/paddle/fluid/layers/control_flow.py
python/paddle/fluid/layers/control_flow.py
+5
-4
python/paddle/fluid/layers/detection.py
python/paddle/fluid/layers/detection.py
+62
-58
python/paddle/fluid/layers/io.py
python/paddle/fluid/layers/io.py
+5
-6
python/paddle/fluid/layers/nn.py
python/paddle/fluid/layers/nn.py
+268
-213
python/paddle/fluid/layers/tensor.py
python/paddle/fluid/layers/tensor.py
+8
-3
python/paddle/fluid/metrics.py
python/paddle/fluid/metrics.py
+14
-8
python/paddle/fluid/tests/unittests/test_conv2d_fusion_op.py
python/paddle/fluid/tests/unittests/test_conv2d_fusion_op.py
+33
-8
python/paddle/fluid/tests/unittests/test_layers.py
python/paddle/fluid/tests/unittests/test_layers.py
+8
-0
python/paddle/fluid/tests/unittests/testsuite.py
python/paddle/fluid/tests/unittests/testsuite.py
+2
-2
python/paddle/fluid/transpiler/distribute_transpiler.py
python/paddle/fluid/transpiler/distribute_transpiler.py
+16
-7
未找到文件。
cmake/cuda.cmake
浏览文件 @
48324c32
...
@@ -139,10 +139,12 @@ endfunction()
...
@@ -139,10 +139,12 @@ endfunction()
message
(
STATUS
"CUDA detected: "
${
CUDA_VERSION
}
)
message
(
STATUS
"CUDA detected: "
${
CUDA_VERSION
}
)
if
(
${
CUDA_VERSION
}
LESS 7.0
)
if
(
${
CUDA_VERSION
}
LESS 7.0
)
set
(
paddle_known_gpu_archs
${
paddle_known_gpu_archs
}
)
set
(
paddle_known_gpu_archs
${
paddle_known_gpu_archs
}
)
add_definitions
(
"-DPADDLE_CUDA_BINVER=
\"
60
\"
"
)
elseif
(
${
CUDA_VERSION
}
LESS 8.0
)
# CUDA 7.x
elseif
(
${
CUDA_VERSION
}
LESS 8.0
)
# CUDA 7.x
set
(
paddle_known_gpu_archs
${
paddle_known_gpu_archs7
}
)
set
(
paddle_known_gpu_archs
${
paddle_known_gpu_archs7
}
)
list
(
APPEND CUDA_NVCC_FLAGS
"-D_MWAITXINTRIN_H_INCLUDED"
)
list
(
APPEND CUDA_NVCC_FLAGS
"-D_MWAITXINTRIN_H_INCLUDED"
)
list
(
APPEND CUDA_NVCC_FLAGS
"-D__STRICT_ANSI__"
)
list
(
APPEND CUDA_NVCC_FLAGS
"-D__STRICT_ANSI__"
)
add_definitions
(
"-DPADDLE_CUDA_BINVER=
\"
70
\"
"
)
elseif
(
${
CUDA_VERSION
}
LESS 9.0
)
# CUDA 8.x
elseif
(
${
CUDA_VERSION
}
LESS 9.0
)
# CUDA 8.x
set
(
paddle_known_gpu_archs
${
paddle_known_gpu_archs8
}
)
set
(
paddle_known_gpu_archs
${
paddle_known_gpu_archs8
}
)
list
(
APPEND CUDA_NVCC_FLAGS
"-D_MWAITXINTRIN_H_INCLUDED"
)
list
(
APPEND CUDA_NVCC_FLAGS
"-D_MWAITXINTRIN_H_INCLUDED"
)
...
@@ -150,6 +152,7 @@ elseif (${CUDA_VERSION} LESS 9.0) # CUDA 8.x
...
@@ -150,6 +152,7 @@ elseif (${CUDA_VERSION} LESS 9.0) # CUDA 8.x
# CUDA 8 may complain that sm_20 is no longer supported. Suppress the
# CUDA 8 may complain that sm_20 is no longer supported. Suppress the
# warning for now.
# warning for now.
list
(
APPEND CUDA_NVCC_FLAGS
"-Wno-deprecated-gpu-targets"
)
list
(
APPEND CUDA_NVCC_FLAGS
"-Wno-deprecated-gpu-targets"
)
add_definitions
(
"-DPADDLE_CUDA_BINVER=
\"
80
\"
"
)
endif
()
endif
()
include_directories
(
${
CUDA_INCLUDE_DIRS
}
)
include_directories
(
${
CUDA_INCLUDE_DIRS
}
)
...
...
cmake/cudnn.cmake
浏览文件 @
48324c32
...
@@ -89,6 +89,7 @@ if(CUDNN_FOUND)
...
@@ -89,6 +89,7 @@ if(CUDNN_FOUND)
if
(
NOT CUDNN_MAJOR_VERSION
)
if
(
NOT CUDNN_MAJOR_VERSION
)
set
(
CUDNN_VERSION
"???"
)
set
(
CUDNN_VERSION
"???"
)
else
()
else
()
add_definitions
(
"-DPADDLE_CUDNN_BINVER=
\"
${
CUDNN_MAJOR_VERSION
}
\"
"
)
math
(
EXPR CUDNN_VERSION
math
(
EXPR CUDNN_VERSION
"
${
CUDNN_MAJOR_VERSION
}
* 1000 +
"
${
CUDNN_MAJOR_VERSION
}
* 1000 +
${
CUDNN_MINOR_VERSION
}
* 100 +
${
CUDNN_PATCHLEVEL_VERSION
}
"
)
${
CUDNN_MINOR_VERSION
}
* 100 +
${
CUDNN_PATCHLEVEL_VERSION
}
"
)
...
...
cmake/external/cub.cmake
浏览文件 @
48324c32
...
@@ -32,4 +32,4 @@ endif()
...
@@ -32,4 +32,4 @@ endif()
add_dependencies
(
cub extern_cub
)
add_dependencies
(
cub extern_cub
)
LIST
(
APPEND externl_project_dependencies cub
)
LIST
(
APPEND extern
a
l_project_dependencies cub
)
cmake/external/dlpack.cmake
浏览文件 @
48324c32
...
@@ -28,4 +28,4 @@ endif()
...
@@ -28,4 +28,4 @@ endif()
add_dependencies
(
dlpack extern_dlpack
)
add_dependencies
(
dlpack extern_dlpack
)
LIST
(
APPEND externl_project_dependencies dlpack
)
LIST
(
APPEND extern
a
l_project_dependencies dlpack
)
cmake/operators.cmake
浏览文件 @
48324c32
...
@@ -110,7 +110,7 @@ function(op_library TARGET)
...
@@ -110,7 +110,7 @@ function(op_library TARGET)
# Define operators that don't need pybind here.
# Define operators that don't need pybind here.
foreach
(
manual_pybind_op
"compare_op"
"logical_op"
"nccl_op"
foreach
(
manual_pybind_op
"compare_op"
"logical_op"
"nccl_op"
"tensor_array_read_write_op"
"tensorrt_engine_op"
"conv_fusion_op"
"tensor_array_read_write_op"
"tensorrt_engine_op"
"conv_fusion_op"
"fusion_transpose_flatten_concat_op"
)
"fusion_transpose_flatten_concat_op"
"fusion_conv_inception_op"
)
if
(
"
${
TARGET
}
"
STREQUAL
"
${
manual_pybind_op
}
"
)
if
(
"
${
TARGET
}
"
STREQUAL
"
${
manual_pybind_op
}
"
)
set
(
pybind_flag 1
)
set
(
pybind_flag 1
)
endif
()
endif
()
...
...
paddle/fluid/framework/CMakeLists.txt
浏览文件 @
48324c32
...
@@ -72,13 +72,13 @@ cc_test(reader_test SRCS reader_test.cc DEPS reader)
...
@@ -72,13 +72,13 @@ cc_test(reader_test SRCS reader_test.cc DEPS reader)
cc_library
(
threadpool SRCS threadpool.cc DEPS enforce
)
cc_library
(
threadpool SRCS threadpool.cc DEPS enforce
)
cc_test
(
threadpool_test SRCS threadpool_test.cc DEPS threadpool
)
cc_test
(
threadpool_test SRCS threadpool_test.cc DEPS threadpool
)
cc_library
(
var_type_traits SRCS var_type_traits DEPS lod_tensor selected_rows framework_proto
)
cc_library
(
var_type_traits SRCS var_type_traits DEPS lod_tensor selected_rows framework_proto
)
if
(
WITH_GPU
)
if
(
WITH_GPU
)
target_link_libraries
(
var_type_traits dynload_cuda
)
target_link_libraries
(
var_type_traits dynload_cuda
)
endif
()
endif
()
cc_test
(
var_type_traits_test SRCS var_type_traits_test.cc DEPS var_type_traits
)
cc_test
(
var_type_traits_test SRCS var_type_traits_test.cc DEPS var_type_traits
)
cc_library
(
scope SRCS scope.cc DEPS glog threadpool var_type_traits
)
cc_library
(
scope SRCS scope.cc DEPS glog threadpool
xxhash
var_type_traits
)
cc_library
(
scope_pool SRCS scope_pool.cc DEPS scope
)
cc_library
(
scope_pool SRCS scope_pool.cc DEPS scope
)
cc_test
(
scope_test SRCS scope_test.cc DEPS scope
)
cc_test
(
scope_test SRCS scope_test.cc DEPS scope
)
cc_test
(
variable_test SRCS variable_test.cc DEPS tensor var_type_traits
)
cc_test
(
variable_test SRCS variable_test.cc DEPS tensor var_type_traits
)
...
@@ -189,9 +189,9 @@ cc_library(parallel_executor SRCS parallel_executor.cc DEPS
...
@@ -189,9 +189,9 @@ cc_library(parallel_executor SRCS parallel_executor.cc DEPS
fast_threaded_ssa_graph_executor variable_helper
)
fast_threaded_ssa_graph_executor variable_helper
)
if
(
WITH_PSLIB
)
if
(
WITH_PSLIB
)
cc_library
(
async_executor SRCS async_executor.cc data_feed.cc data_feed_factory.cc executor_thread_worker.cc DEPS op_registry device_context scope framework_proto glog lod_rank_table feed_fetch_method graph_to_program_pass async_executor_proto variable_helper pslib_brpc pslib
)
cc_library
(
async_executor SRCS async_executor.cc data_feed.cc data_feed_factory.cc executor_thread_worker.cc DEPS op_registry device_context scope framework_proto glog lod_rank_table feed_fetch_method graph_to_program_pass async_executor_proto variable_helper pslib_brpc pslib
timer
)
else
()
else
()
cc_library
(
async_executor SRCS async_executor.cc data_feed.cc data_feed_factory.cc executor_thread_worker.cc DEPS op_registry device_context scope framework_proto glog lod_rank_table feed_fetch_method graph_to_program_pass async_executor_proto variable_helper
)
cc_library
(
async_executor SRCS async_executor.cc data_feed.cc data_feed_factory.cc executor_thread_worker.cc DEPS op_registry device_context scope framework_proto glog lod_rank_table feed_fetch_method graph_to_program_pass async_executor_proto variable_helper
timer
)
endif
(
WITH_PSLIB
)
endif
(
WITH_PSLIB
)
...
...
paddle/fluid/framework/async_executor.cc
浏览文件 @
48324c32
...
@@ -304,8 +304,13 @@ void AsyncExecutor::RunFromFile(const ProgramDesc& main_program,
...
@@ -304,8 +304,13 @@ void AsyncExecutor::RunFromFile(const ProgramDesc& main_program,
// start executing ops in multiple threads
// start executing ops in multiple threads
for
(
int
thidx
=
0
;
thidx
<
actual_thread_num
;
++
thidx
)
{
for
(
int
thidx
=
0
;
thidx
<
actual_thread_num
;
++
thidx
)
{
threads
.
push_back
(
if
(
debug
)
{
std
::
thread
(
&
ExecutorThreadWorker
::
TrainFiles
,
workers
[
thidx
].
get
()));
threads
.
push_back
(
std
::
thread
(
&
ExecutorThreadWorker
::
TrainFilesWithTimer
,
workers
[
thidx
].
get
()));
}
else
{
threads
.
push_back
(
std
::
thread
(
&
ExecutorThreadWorker
::
TrainFiles
,
workers
[
thidx
].
get
()));
}
}
}
for
(
auto
&
th
:
threads
)
{
for
(
auto
&
th
:
threads
)
{
...
...
paddle/fluid/framework/details/all_reduce_op_handle.cc
浏览文件 @
48324c32
...
@@ -50,7 +50,7 @@ void AllReduceOpHandle::RunImpl() {
...
@@ -50,7 +50,7 @@ void AllReduceOpHandle::RunImpl() {
// FIXME(typhoonzero): If scope0(global scope) have NCCL_ID_VAR,
// FIXME(typhoonzero): If scope0(global scope) have NCCL_ID_VAR,
// this is a distributed or inter-process call, find a better way.
// this is a distributed or inter-process call, find a better way.
#if
def PADDLE_WITH_CUDA
#if
defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
if
(
NoDummyInputSize
()
==
1
&&
if
(
NoDummyInputSize
()
==
1
&&
local_scopes_
[
0
]
->
FindLocalVar
(
NCCL_ID_VARNAME
)
==
nullptr
)
{
local_scopes_
[
0
]
->
FindLocalVar
(
NCCL_ID_VARNAME
)
==
nullptr
)
{
#else
#else
...
...
paddle/fluid/framework/details/execution_strategy.h
浏览文件 @
48324c32
...
@@ -25,7 +25,7 @@ struct ExecutionStrategy {
...
@@ -25,7 +25,7 @@ struct ExecutionStrategy {
size_t
num_threads_
{
0
};
size_t
num_threads_
{
0
};
bool
use_cuda_
{
true
};
bool
use_cuda_
{
true
};
bool
allow_op_delay_
{
false
};
bool
allow_op_delay_
{
false
};
size_t
num_iteration_per_drop_scope_
{
1
00
};
size_t
num_iteration_per_drop_scope_
{
1
};
ExecutorType
type_
{
kDefault
};
ExecutorType
type_
{
kDefault
};
bool
dry_run_
{
false
};
bool
dry_run_
{
false
};
};
};
...
...
paddle/fluid/framework/details/scope_buffered_ssa_graph_executor.cc
浏览文件 @
48324c32
...
@@ -64,20 +64,26 @@ FeedFetchList ScopeBufferedSSAGraphExecutor::Run(
...
@@ -64,20 +64,26 @@ FeedFetchList ScopeBufferedSSAGraphExecutor::Run(
}
}
platform
::
RecordEvent
e
(
"ScopeBufferedSSAGraphExecutorAfterRun"
,
nullptr
);
platform
::
RecordEvent
e
(
"ScopeBufferedSSAGraphExecutorAfterRun"
,
nullptr
);
drop_scope_counter_
+=
1
;
++
drop_scope_counter_
;
if
(
!
fetch_tensors
.
empty
()
||
bool
stream_end
=
false
;
drop_scope_counter_
==
strategy_
.
num_iteration_per_drop_scope_
)
{
if
(
!
fetch_tensors
.
empty
())
{
drop_scope_counter_
=
0
;
WaitComputationalStreams
();
// Wait All computational streams
stream_end
=
true
;
for
(
auto
p
:
places_
)
{
}
platform
::
DeviceContextPool
::
Instance
().
Get
(
p
)
->
Wait
();
if
(
drop_scope_counter_
==
strategy_
.
num_iteration_per_drop_scope_
)
{
if
(
!
stream_end
)
{
WaitComputationalStreams
();
}
}
for
(
auto
&
scope
:
local_scopes_
)
{
for
(
auto
&
scope
:
local_scopes_
)
{
auto
&
local_scope
=
auto
&
local_scope
=
*
scope
->
Var
(
details
::
kLocalExecScopeName
)
->
GetMutable
<
Scope
*>
();
*
scope
->
Var
(
details
::
kLocalExecScopeName
)
->
GetMutable
<
Scope
*>
();
scope
->
DeleteScope
(
local_scope
);
scope
->
DeleteScope
(
local_scope
);
}
}
drop_scope_counter_
=
0
;
}
}
if
(
eptr
)
{
if
(
eptr
)
{
std
::
rethrow_exception
(
eptr
);
std
::
rethrow_exception
(
eptr
);
...
...
paddle/fluid/framework/details/scope_buffered_ssa_graph_executor.h
浏览文件 @
48324c32
...
@@ -47,6 +47,14 @@ class ScopeBufferedSSAGraphExecutor : public SSAGraphExecutor {
...
@@ -47,6 +47,14 @@ class ScopeBufferedSSAGraphExecutor : public SSAGraphExecutor {
FeedFetchList
Run
(
const
std
::
vector
<
std
::
string
>&
fetch_tensors
)
override
;
FeedFetchList
Run
(
const
std
::
vector
<
std
::
string
>&
fetch_tensors
)
override
;
private:
inline
void
WaitComputationalStreams
()
{
// Wait All computational streams
for
(
auto
p
:
places_
)
{
platform
::
DeviceContextPool
::
Instance
().
Get
(
p
)
->
Wait
();
}
}
private:
private:
size_t
drop_scope_counter_
{
0
};
size_t
drop_scope_counter_
{
0
};
...
...
paddle/fluid/framework/executor_thread_worker.cc
浏览文件 @
48324c32
...
@@ -29,6 +29,7 @@ limitations under the License. */
...
@@ -29,6 +29,7 @@ limitations under the License. */
#include "paddle/fluid/inference/io.h"
#include "paddle/fluid/inference/io.h"
#include "paddle/fluid/platform/cpu_helper.h"
#include "paddle/fluid/platform/cpu_helper.h"
#include "paddle/fluid/platform/place.h"
#include "paddle/fluid/platform/place.h"
#include "paddle/fluid/platform/timer.h"
#include "paddle/fluid/pybind/pybind.h"
#include "paddle/fluid/pybind/pybind.h"
namespace
paddle
{
namespace
paddle
{
namespace
framework
{
namespace
framework
{
...
@@ -180,6 +181,7 @@ void ExecutorThreadWorker::SetDevice() {
...
@@ -180,6 +181,7 @@ void ExecutorThreadWorker::SetDevice() {
return
;
return
;
#else
#else
static
unsigned
concurrency_cap
=
std
::
thread
::
hardware_concurrency
();
static
unsigned
concurrency_cap
=
std
::
thread
::
hardware_concurrency
();
LOG
(
WARNING
)
<<
"concurrency capacity "
<<
concurrency_cap
;
int
thread_id
=
this
->
thread_id_
;
int
thread_id
=
this
->
thread_id_
;
if
(
static_cast
<
unsigned
>
(
thread_id
)
<
concurrency_cap
)
{
if
(
static_cast
<
unsigned
>
(
thread_id
)
<
concurrency_cap
)
{
...
@@ -238,6 +240,55 @@ static void print_fetch_var(Scope* scope, const std::string& var_name) {
...
@@ -238,6 +240,55 @@ static void print_fetch_var(Scope* scope, const std::string& var_name) {
VLOG
(
1
)
<<
"print_fetch_var: unrecognized data type:"
<<
tensor
.
type
();
VLOG
(
1
)
<<
"print_fetch_var: unrecognized data type:"
<<
tensor
.
type
();
}
}
void
ExecutorThreadWorker
::
TrainFilesWithTimer
()
{
platform
::
SetNumThreads
(
1
);
SetDevice
();
thread_reader_
->
Start
();
std
::
vector
<
double
>
op_total_time
;
std
::
vector
<
std
::
string
>
op_name
;
for
(
auto
&
op
:
ops_
)
{
op_name
.
push_back
(
op
->
Type
());
}
op_total_time
.
resize
(
ops_
.
size
());
for
(
size_t
i
=
0
;
i
<
op_total_time
.
size
();
++
i
)
{
op_total_time
[
i
]
=
0.0
;
}
platform
::
Timer
timeline
;
double
total_time
=
0.0
;
double
read_time
=
0.0
;
int
cur_batch
;
int
batch_cnt
=
0
;
timeline
.
Start
();
while
((
cur_batch
=
thread_reader_
->
Next
())
>
0
)
{
timeline
.
Pause
();
read_time
+=
timeline
.
ElapsedSec
();
total_time
+=
timeline
.
ElapsedSec
();
for
(
size_t
i
=
0
;
i
<
ops_
.
size
();
++
i
)
{
timeline
.
Start
();
ops_
[
i
]
->
Run
(
*
thread_scope_
,
place_
);
timeline
.
Pause
();
op_total_time
[
i
]
+=
timeline
.
ElapsedSec
();
total_time
+=
timeline
.
ElapsedSec
();
}
++
batch_cnt
;
thread_scope_
->
DropKids
();
if
(
thread_id_
==
0
)
{
if
(
batch_cnt
>
0
&&
batch_cnt
%
1000
==
0
)
{
for
(
size_t
i
=
0
;
i
<
ops_
.
size
();
++
i
)
{
fprintf
(
stderr
,
"op_name:[%zu][%s], op_mean_time:[%fs]
\n
"
,
i
,
op_name
[
i
].
c_str
(),
op_total_time
[
i
]
/
batch_cnt
);
}
fprintf
(
stderr
,
"mean read time: %fs
\n
"
,
read_time
/
batch_cnt
);
int
fetch_var_num
=
fetch_var_names_
.
size
();
for
(
int
i
=
0
;
i
<
fetch_var_num
;
++
i
)
{
print_fetch_var
(
thread_scope_
,
fetch_var_names_
[
i
]);
}
}
}
timeline
.
Start
();
}
}
void
ExecutorThreadWorker
::
TrainFiles
()
{
void
ExecutorThreadWorker
::
TrainFiles
()
{
platform
::
SetNumThreads
(
1
);
platform
::
SetNumThreads
(
1
);
...
@@ -320,10 +371,12 @@ void AsyncExecutorThreadWorker::SetPSlibPtr(
...
@@ -320,10 +371,12 @@ void AsyncExecutorThreadWorker::SetPSlibPtr(
std
::
shared_ptr
<
paddle
::
distributed
::
PSlib
>
pslib_ptr
)
{
std
::
shared_ptr
<
paddle
::
distributed
::
PSlib
>
pslib_ptr
)
{
_pslib_ptr
=
pslib_ptr
;
_pslib_ptr
=
pslib_ptr
;
}
}
void
AsyncExecutorThreadWorker
::
SetPullDenseThread
(
void
AsyncExecutorThreadWorker
::
SetPullDenseThread
(
std
::
shared_ptr
<
DensePullThread
>
dpt
)
{
std
::
shared_ptr
<
DensePullThread
>
dpt
)
{
_pull_dense_thread
=
dpt
;
_pull_dense_thread
=
dpt
;
}
}
void
AsyncExecutorThreadWorker
::
TrainOneNetwork
()
{
void
AsyncExecutorThreadWorker
::
TrainOneNetwork
()
{
PrepareParams
();
PrepareParams
();
...
...
paddle/fluid/framework/executor_thread_worker.h
浏览文件 @
48324c32
...
@@ -155,6 +155,8 @@ class ExecutorThreadWorker {
...
@@ -155,6 +155,8 @@ class ExecutorThreadWorker {
void
SetDataFeed
(
const
std
::
shared_ptr
<
DataFeed
>&
datafeed
);
void
SetDataFeed
(
const
std
::
shared_ptr
<
DataFeed
>&
datafeed
);
// A multi-thread training function
// A multi-thread training function
virtual
void
TrainFiles
();
virtual
void
TrainFiles
();
// with timer log
virtual
void
TrainFilesWithTimer
();
// set fetch variable names from python interface assigned by users
// set fetch variable names from python interface assigned by users
void
SetFetchVarNames
(
const
std
::
vector
<
std
::
string
>&
fetch_var_names
);
void
SetFetchVarNames
(
const
std
::
vector
<
std
::
string
>&
fetch_var_names
);
#ifdef PADDLE_WITH_PSLIB
#ifdef PADDLE_WITH_PSLIB
...
...
paddle/fluid/framework/rw_lock.h
浏览文件 @
48324c32
...
@@ -16,7 +16,9 @@ limitations under the License. */
...
@@ -16,7 +16,9 @@ limitations under the License. */
#if !defined(_WIN32)
#if !defined(_WIN32)
#include <pthread.h>
#include <pthread.h>
#endif // !_WIN32
#else
#include <mutex> // NOLINT
#endif // !_WIN32
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/platform/enforce.h"
...
@@ -29,17 +31,17 @@ struct RWLock {
...
@@ -29,17 +31,17 @@ struct RWLock {
~
RWLock
()
{
pthread_rwlock_destroy
(
&
lock_
);
}
~
RWLock
()
{
pthread_rwlock_destroy
(
&
lock_
);
}
void
RDLock
()
{
inline
void
RDLock
()
{
PADDLE_ENFORCE_EQ
(
pthread_rwlock_rdlock
(
&
lock_
),
0
,
PADDLE_ENFORCE_EQ
(
pthread_rwlock_rdlock
(
&
lock_
),
0
,
"acquire read lock failed"
);
"acquire read lock failed"
);
}
}
void
WRLock
()
{
inline
void
WRLock
()
{
PADDLE_ENFORCE_EQ
(
pthread_rwlock_wrlock
(
&
lock_
),
0
,
PADDLE_ENFORCE_EQ
(
pthread_rwlock_wrlock
(
&
lock_
),
0
,
"acquire write lock failed"
);
"acquire write lock failed"
);
}
}
void
UNLock
()
{
inline
void
UNLock
()
{
PADDLE_ENFORCE_EQ
(
pthread_rwlock_unlock
(
&
lock_
),
0
,
"unlock failed"
);
PADDLE_ENFORCE_EQ
(
pthread_rwlock_unlock
(
&
lock_
),
0
,
"unlock failed"
);
}
}
...
@@ -51,81 +53,46 @@ struct RWLock {
...
@@ -51,81 +53,46 @@ struct RWLock {
// https://stackoverflow.com/questions/7125250/making-pthread-rwlock-wrlock-recursive
// https://stackoverflow.com/questions/7125250/making-pthread-rwlock-wrlock-recursive
// In windows, rw_lock seems like a hack. Use empty object and do nothing.
// In windows, rw_lock seems like a hack. Use empty object and do nothing.
struct
RWLock
{
struct
RWLock
{
void
RDLock
()
{}
// FIXME(minqiyang): use mutex here to do fake lock
void
WRLock
()
{}
inline
void
RDLock
()
{
mutex_
.
lock
();
}
void
UNLock
()
{}
inline
void
WRLock
()
{
mutex_
.
lock
();
}
inline
void
UNLock
()
{
mutex_
.
unlock
();
}
private:
std
::
mutex
mutex_
;
};
};
#endif
#endif
class
RWLockGuard
{
class
AutoWRLock
{
public:
public:
enum
Status
{
kUnLock
,
kWRLock
,
kRDLock
};
explicit
AutoWRLock
(
RWLock
*
rw_lock
)
:
lock_
(
rw_lock
)
{
Lock
();
}
RWLockGuard
(
RWLock
*
rw_lock
,
Status
init_status
)
:
lock_
(
rw_lock
),
status_
(
Status
::
kUnLock
)
{
switch
(
init_status
)
{
case
Status
::
kRDLock
:
{
RDLock
();
break
;
}
case
Status
::
kWRLock
:
{
WRLock
();
break
;
}
case
Status
::
kUnLock
:
{
break
;
}
}
}
void
WRLock
()
{
~
AutoWRLock
()
{
UnLock
();
}
switch
(
status_
)
{
case
Status
::
kUnLock
:
{
lock_
->
WRLock
();
status_
=
Status
::
kWRLock
;
break
;
}
case
Status
::
kWRLock
:
{
break
;
}
case
Status
::
kRDLock
:
{
PADDLE_THROW
(
"Please unlock read lock first before invoking write lock."
);
break
;
}
}
}
void
RDLock
()
{
private:
switch
(
status_
)
{
inline
void
Lock
()
{
lock_
->
WRLock
();
}
case
Status
::
kUnLock
:
{
lock_
->
RDLock
();
status_
=
Status
::
kRDLock
;
break
;
}
case
Status
::
kRDLock
:
{
break
;
}
case
Status
::
kWRLock
:
{
PADDLE_THROW
(
"Please unlock write lock first before invoking read lock."
);
break
;
}
}
}
void
UnLock
()
{
inline
void
UnLock
()
{
lock_
->
UNLock
();
}
if
(
status_
!=
Status
::
kUnLock
)
{
lock_
->
UNLock
();
private:
status_
=
Status
::
kUnLock
;
RWLock
*
lock_
;
}
};
}
class
AutoRDLock
{
public:
explicit
AutoRDLock
(
RWLock
*
rw_lock
)
:
lock_
(
rw_lock
)
{
Lock
();
}
~
AutoRDLock
()
{
UnLock
();
}
private:
inline
void
Lock
()
{
lock_
->
RDLock
();
}
~
RWLockGuard
()
{
Un
Lock
();
}
inline
void
UnLock
()
{
lock_
->
UN
Lock
();
}
private:
private:
RWLock
*
lock_
;
RWLock
*
lock_
;
Status
status_
;
};
};
}
// namespace framework
}
// namespace framework
...
...
paddle/fluid/framework/scope.cc
浏览文件 @
48324c32
...
@@ -47,9 +47,15 @@ DEFINE_bool(fast_eager_deletion_mode, false,
...
@@ -47,9 +47,15 @@ DEFINE_bool(fast_eager_deletion_mode, false,
// the mutex will cause serious performance issue.
// the mutex will cause serious performance issue.
// So the mutex is disabled when `ON_INFER`.
// So the mutex is disabled when `ON_INFER`.
#ifdef PADDLE_ON_INFERENCE
#ifdef PADDLE_ON_INFERENCE
#define SCOPE_LOCK_GUARD
#define SCOPE_KIDS_READER_LOCK
#define SCOPE_KIDS_WRITER_LOCK
#define SCOPE_VARS_READER_LOCK
#define SCOPE_VARS_WRITER_LOCK
#else
#else
#define SCOPE_LOCK_GUARD std::lock_guard<std::mutex> lock(mutex_);
#define SCOPE_KIDS_READER_LOCK AutoRDLock auto_lock(&kids_lock_);
#define SCOPE_KIDS_WRITER_LOCK AutoWRLock auto_lock(&kids_lock_);
#define SCOPE_VARS_READER_LOCK AutoRDLock auto_lock(&vars_lock_);
#define SCOPE_VARS_WRITER_LOCK AutoWRLock auto_lock(&vars_lock_);
#endif
#endif
namespace
paddle
{
namespace
paddle
{
...
@@ -67,64 +73,69 @@ bool IsFastEagerDeletionModeEnabled() { return FLAGS_fast_eager_deletion_mode; }
...
@@ -67,64 +73,69 @@ bool IsFastEagerDeletionModeEnabled() { return FLAGS_fast_eager_deletion_mode; }
Scope
::~
Scope
()
{
DropKids
();
}
Scope
::~
Scope
()
{
DropKids
();
}
Scope
&
Scope
::
NewScope
()
const
{
Scope
&
Scope
::
NewScope
()
const
{
SCOPE_LOCK_GUARD
Scope
*
child
=
new
Scope
(
this
);
kids_
.
push_back
(
new
Scope
(
this
));
{
return
*
kids_
.
back
();
SCOPE_KIDS_WRITER_LOCK
kids_
.
push_back
(
child
);
}
return
*
child
;
}
}
Variable
*
Scope
::
Var
(
const
std
::
string
&
name
)
{
Variable
*
Scope
::
Var
(
const
std
::
string
&
name
)
{
SCOPE_
LOCK_GUARD
SCOPE_
VARS_WRITER_LOCK
return
VarInternal
(
name
);
return
VarInternal
(
name
);
}
}
Variable
*
Scope
::
Var
(
std
::
string
*
name
)
{
Variable
*
Scope
::
Var
(
std
::
string
*
name
)
{
SCOPE_LOCK_GUARD
auto
new_name
=
string
::
Sprintf
(
"%p.%d"
,
this
,
vars_
.
size
());
auto
new_name
=
string
::
Sprintf
(
"%p.%d"
,
this
,
vars_
.
size
());
if
(
name
!=
nullptr
)
{
if
(
name
!=
nullptr
)
{
*
name
=
new_name
;
*
name
=
new_name
;
}
}
SCOPE_VARS_WRITER_LOCK
return
VarInternal
(
new_name
);
return
VarInternal
(
new_name
);
}
}
Variable
*
Scope
::
FindVar
(
const
std
::
string
&
name
)
const
{
Variable
*
Scope
::
FindVar
(
const
std
::
string
&
name
)
const
{
SCOPE_
LOCK_GUARD
SCOPE_
VARS_READER_LOCK
return
FindVarInternal
(
name
);
return
FindVarInternal
(
name
);
}
}
Variable
*
Scope
::
FindLocalVar
(
const
std
::
string
&
name
)
const
{
Variable
*
Scope
::
FindLocalVar
(
const
std
::
string
&
name
)
const
{
SCOPE_
LOCK_GUARD
SCOPE_
VARS_READER_LOCK
return
FindVarLocally
(
name
);
return
FindVarLocally
(
name
);
}
}
const
Scope
*
Scope
::
FindScope
(
const
Variable
*
var
)
const
{
const
Scope
*
Scope
::
FindScope
(
const
Variable
*
var
)
const
{
SCOPE_
LOCK_GUARD
SCOPE_
VARS_READER_LOCK
return
FindScopeInternal
(
var
);
return
FindScopeInternal
(
var
);
}
}
void
Scope
::
DropKids
()
{
void
Scope
::
DropKids
()
{
SCOPE_
LOCK_GUARD
SCOPE_
KIDS_WRITER_LOCK
for
(
Scope
*
s
:
kids_
)
delete
s
;
for
(
Scope
*
s
:
kids_
)
delete
s
;
kids_
.
clear
();
kids_
.
clear
();
}
}
bool
Scope
::
HasKid
(
const
Scope
*
scope
)
const
{
bool
Scope
::
HasKid
(
const
Scope
*
scope
)
const
{
SCOPE_
LOCK_GUARD
SCOPE_
KIDS_READER_LOCK
auto
it
=
std
::
find
(
this
->
kids_
.
begin
(),
this
->
kids_
.
end
(),
scope
);
auto
it
=
std
::
find
(
this
->
kids_
.
begin
(),
this
->
kids_
.
end
(),
scope
);
return
it
!=
this
->
kids_
.
end
();
return
it
!=
this
->
kids_
.
end
();
}
}
std
::
vector
<
std
::
string
>
Scope
::
LocalVarNames
()
const
{
std
::
vector
<
std
::
string
>
Scope
::
LocalVarNames
()
const
{
SCOPE_LOCK_GUARD
std
::
vector
<
std
::
string
>
known_vars
;
std
::
vector
<
std
::
string
>
known_vars
;
known_vars
.
reserve
(
this
->
vars_
.
size
());
{
for
(
auto
&
p
:
vars_
)
{
SCOPE_VARS_READER_LOCK
known_vars
.
emplace_back
(
p
.
first
);
known_vars
.
reserve
(
this
->
vars_
.
size
());
for
(
auto
&
p
:
vars_
)
{
known_vars
.
emplace_back
(
p
.
first
);
}
}
}
return
known_vars
;
return
known_vars
;
}
}
void
Scope
::
DeleteScope
(
Scope
*
scope
)
const
{
void
Scope
::
DeleteScope
(
Scope
*
scope
)
const
{
SCOPE_
LOCK_GUARD
SCOPE_
KIDS_WRITER_LOCK
auto
it
=
std
::
find
(
this
->
kids_
.
begin
(),
this
->
kids_
.
end
(),
scope
);
auto
it
=
std
::
find
(
this
->
kids_
.
begin
(),
this
->
kids_
.
end
(),
scope
);
PADDLE_ENFORCE
(
it
!=
this
->
kids_
.
end
(),
"%p Cannot find %p as kid scope"
,
PADDLE_ENFORCE
(
it
!=
this
->
kids_
.
end
(),
"%p Cannot find %p as kid scope"
,
this
,
scope
);
this
,
scope
);
...
@@ -138,8 +149,8 @@ void Scope::DeleteScope(Scope* scope) const {
...
@@ -138,8 +149,8 @@ void Scope::DeleteScope(Scope* scope) const {
}
}
void
Scope
::
EraseVars
(
const
std
::
vector
<
std
::
string
>&
var_names
)
{
void
Scope
::
EraseVars
(
const
std
::
vector
<
std
::
string
>&
var_names
)
{
SCOPE_LOCK_GUARD
std
::
set
<
std
::
string
>
var_set
(
var_names
.
begin
(),
var_names
.
end
());
std
::
set
<
std
::
string
>
var_set
(
var_names
.
begin
(),
var_names
.
end
());
SCOPE_VARS_WRITER_LOCK
for
(
auto
it
=
vars_
.
begin
();
it
!=
vars_
.
end
();)
{
for
(
auto
it
=
vars_
.
begin
();
it
!=
vars_
.
end
();)
{
if
(
var_set
.
find
(
it
->
first
)
!=
var_set
.
end
())
{
if
(
var_set
.
find
(
it
->
first
)
!=
var_set
.
end
())
{
it
=
vars_
.
erase
(
it
);
it
=
vars_
.
erase
(
it
);
...
@@ -151,12 +162,12 @@ void Scope::EraseVars(const std::vector<std::string>& var_names) {
...
@@ -151,12 +162,12 @@ void Scope::EraseVars(const std::vector<std::string>& var_names) {
void
Scope
::
Rename
(
const
std
::
string
&
origin_name
,
void
Scope
::
Rename
(
const
std
::
string
&
origin_name
,
const
std
::
string
&
new_name
)
const
{
const
std
::
string
&
new_name
)
const
{
SCOPE_
LOCK_GUARD
SCOPE_
VARS_WRITER_LOCK
RenameInternal
(
origin_name
,
new_name
);
RenameInternal
(
origin_name
,
new_name
);
}
}
std
::
string
Scope
::
Rename
(
const
std
::
string
&
origin_name
)
const
{
std
::
string
Scope
::
Rename
(
const
std
::
string
&
origin_name
)
const
{
SCOPE_
LOCK_GUARD
SCOPE_
VARS_WRITER_LOCK
auto
new_name
=
string
::
Sprintf
(
"%p.%d"
,
this
,
vars_
.
size
());
auto
new_name
=
string
::
Sprintf
(
"%p.%d"
,
this
,
vars_
.
size
());
RenameInternal
(
origin_name
,
new_name
);
RenameInternal
(
origin_name
,
new_name
);
return
new_name
;
return
new_name
;
...
...
paddle/fluid/framework/scope.h
浏览文件 @
48324c32
...
@@ -14,12 +14,18 @@ limitations under the License. */
...
@@ -14,12 +14,18 @@ limitations under the License. */
#pragma once
#pragma once
extern
"C"
{
#include <xxhash.h>
}
#include <list>
#include <list>
#include <m
utex> // NOLINT
#include <m
emory>
#include <string>
#include <string>
#include <unordered_map>
#include <unordered_map>
#include <utility>
#include <vector>
#include <vector>
#include "paddle/fluid/framework/rw_lock.h"
#include "paddle/fluid/framework/variable.h"
#include "paddle/fluid/framework/variable.h"
#include "paddle/fluid/platform/macros.h"
#include "paddle/fluid/platform/macros.h"
...
@@ -95,7 +101,14 @@ class Scope {
...
@@ -95,7 +101,14 @@ class Scope {
std
::
string
Rename
(
const
std
::
string
&
origin_name
)
const
;
std
::
string
Rename
(
const
std
::
string
&
origin_name
)
const
;
protected:
protected:
mutable
std
::
unordered_map
<
std
::
string
,
std
::
unique_ptr
<
Variable
>>
vars_
;
struct
KeyHasher
{
std
::
size_t
operator
()(
const
std
::
string
&
key
)
const
{
return
XXH32
(
key
.
c_str
(),
key
.
size
(),
1
);
}
};
mutable
std
::
unordered_map
<
std
::
string
,
std
::
unique_ptr
<
Variable
>
,
KeyHasher
>
vars_
;
private:
private:
// Call Scope::NewScope for a sub-scope.
// Call Scope::NewScope for a sub-scope.
...
@@ -124,7 +137,8 @@ class Scope {
...
@@ -124,7 +137,8 @@ class Scope {
DISABLE_COPY_AND_ASSIGN
(
Scope
);
DISABLE_COPY_AND_ASSIGN
(
Scope
);
private:
private:
mutable
std
::
mutex
mutex_
;
mutable
RWLock
kids_lock_
;
mutable
RWLock
vars_lock_
;
};
};
// Generate some debug string about the inherience structure of scope, quite
// Generate some debug string about the inherience structure of scope, quite
...
...
paddle/fluid/operators/conv_cudnn_op_cache.h
浏览文件 @
48324c32
...
@@ -19,6 +19,10 @@ limitations under the License. */
...
@@ -19,6 +19,10 @@ limitations under the License. */
#include <vector>
#include <vector>
#include "paddle/fluid/platform/cudnn_helper.h"
#include "paddle/fluid/platform/cudnn_helper.h"
DECLARE_uint64
(
conv_workspace_size_limit
);
DECLARE_bool
(
cudnn_exhaustive_search
);
DECLARE_int64
(
cudnn_exhaustive_search_times
);
namespace
paddle
{
namespace
paddle
{
namespace
operators
{
namespace
operators
{
...
@@ -45,6 +49,7 @@ static constexpr size_t kNUM_CUDNN_BWD_DATA_ALGS = 5;
...
@@ -45,6 +49,7 @@ static constexpr size_t kNUM_CUDNN_BWD_DATA_ALGS = 5;
template
<
typename
TAlgorithm
>
template
<
typename
TAlgorithm
>
class
AlgorithmsCache
{
class
AlgorithmsCache
{
public:
public:
AlgorithmsCache
()
:
search_times_
(
0
)
{
hash_
.
clear
();
}
// Caches the best algorithm for a given
// Caches the best algorithm for a given
// combination of tensor dimensions & compute data type.
// combination of tensor dimensions & compute data type.
TAlgorithm
GetAlgorithm
(
TAlgorithm
GetAlgorithm
(
...
@@ -54,9 +59,14 @@ class AlgorithmsCache {
...
@@ -54,9 +59,14 @@ class AlgorithmsCache {
int
algorithmFlags
,
// can set for different data type
int
algorithmFlags
,
// can set for different data type
std
::
function
<
TAlgorithm
()
>
gen_func
);
std
::
function
<
TAlgorithm
()
>
gen_func
);
TAlgorithm
GetAlgorithm
(
int64_t
area
,
int
search_times
,
int
algorithmFlags
,
std
::
function
<
TAlgorithm
()
>
gen_func
);
private:
private:
std
::
unordered_map
<
int64_t
,
TAlgorithm
>
hash_
;
std
::
unordered_map
<
int64_t
,
TAlgorithm
>
hash_
;
std
::
mutex
mutex_
;
std
::
mutex
mutex_
;
int
search_times_
;
};
};
template
<
typename
TAlgorithm
>
template
<
typename
TAlgorithm
>
...
@@ -107,5 +117,29 @@ TAlgorithm AlgorithmsCache<TAlgorithm>::GetAlgorithm(
...
@@ -107,5 +117,29 @@ TAlgorithm AlgorithmsCache<TAlgorithm>::GetAlgorithm(
return
hash_
[
seed
];
return
hash_
[
seed
];
}
}
template
<
typename
TAlgorithm
>
TAlgorithm
AlgorithmsCache
<
TAlgorithm
>::
GetAlgorithm
(
int64_t
area
,
int
search_times
,
int
algorithmFlags
,
std
::
function
<
TAlgorithm
()
>
gen_func
)
{
if
(
hash_
.
find
(
area
)
!=
hash_
.
end
())
{
return
hash_
[
area
];
}
if
(
search_times_
<
search_times
)
{
auto
algo
=
gen_func
();
hash_
[
area
]
=
algo
;
++
search_times_
;
return
algo
;
}
TAlgorithm
algo
;
int64_t
min
=
static_cast
<
uint64_t
>
(
INT_MAX
);
for
(
const
auto
&
m
:
hash_
)
{
if
(
m
.
first
<
min
)
{
min
=
m
.
first
;
algo
=
m
.
second
;
}
}
return
algo
;
}
}
// namespace operators
}
// namespace operators
}
// namespace paddle
}
// namespace paddle
paddle/fluid/operators/conv_fusion_op.cc
浏览文件 @
48324c32
...
@@ -28,6 +28,8 @@ namespace operators {
...
@@ -28,6 +28,8 @@ namespace operators {
// x is Input,
// x is Input,
// z is ResidualData,
// z is ResidualData,
// bias is Bias
// bias is Bias
// When `split_channels` is set, y will be splitted into multiple outputs,
// each output has split_channels[i] number of channels.
class
Conv2DFusionOpMaker
:
public
Conv2DOpMaker
{
class
Conv2DFusionOpMaker
:
public
Conv2DOpMaker
{
protected:
protected:
void
Apply
()
override
{
void
Apply
()
override
{
...
@@ -36,8 +38,65 @@ class Conv2DFusionOpMaker : public Conv2DOpMaker {
...
@@ -36,8 +38,65 @@ class Conv2DFusionOpMaker : public Conv2DOpMaker {
"The activation type can be 'identity', 'sigmoid', 'relu', 'relu6' "
"The activation type can be 'identity', 'sigmoid', 'relu', 'relu6' "
"'relux' , 'tanh', 'band_pass'"
)
"'relux' , 'tanh', 'band_pass'"
)
.
SetDefault
(
"relu"
);
.
SetDefault
(
"relu"
);
AddAttr
<
std
::
vector
<
int
>>
(
"split_channels"
,
"When `split_channels` are set, there will be multiple outputs, the "
"output size is equal to the number of `split_channels`."
)
.
SetDefault
({});
AddOutput
(
"Outputs"
,
"This Outputs is used when setting `split_channels`."
"Usually used to fuse conv with same input and same filter size, "
"padding, stride, dilation size."
)
.
AsDuplicable
()
.
AsDispensable
();
AddInput
(
"AlgoCache"
,
"The cache of convolution algorithm, a RAW type variable."
)
.
AsDispensable
();
AddAttr
<
int
>
(
"search_times"
,
"The number of exhaustive search times for convolution algorithm."
)
.
SetDefault
(
-
1
);
}
}
};
};
class
Conv2DFusionOpInferShape
:
public
framework
::
InferShapeBase
{
public:
void
operator
()(
framework
::
InferShapeContext
*
ctx
)
const
override
{
PADDLE_ENFORCE
(
ctx
->
HasInput
(
"Input"
),
"Input(Input) of ConvOp should not be null."
);
PADDLE_ENFORCE
(
ctx
->
HasInput
(
"Filter"
),
"Input(Filter) of ConvOp should not be null."
);
auto
in_dims
=
ctx
->
GetInputDim
(
"Input"
);
auto
filter_dims
=
ctx
->
GetInputDim
(
"Filter"
);
std
::
vector
<
int
>
strides
=
ctx
->
Attrs
().
Get
<
std
::
vector
<
int
>>
(
"strides"
);
std
::
vector
<
int
>
paddings
=
ctx
->
Attrs
().
Get
<
std
::
vector
<
int
>>
(
"paddings"
);
std
::
vector
<
int
>
dilations
=
ctx
->
Attrs
().
Get
<
std
::
vector
<
int
>>
(
"dilations"
);
std
::
vector
<
int64_t
>
oshape
({
in_dims
[
0
],
filter_dims
[
0
]});
for
(
size_t
i
=
0
;
i
<
strides
.
size
();
++
i
)
{
oshape
.
push_back
(
ConvOutputSize
(
in_dims
[
i
+
2
],
filter_dims
[
i
+
2
],
dilations
[
i
],
paddings
[
i
],
strides
[
i
]));
}
PADDLE_ENFORCE
(
ctx
->
HasOutput
(
"Output"
),
"Output(Output) of ConvOp should not be null."
);
ctx
->
SetOutputDim
(
"Output"
,
framework
::
make_ddim
(
oshape
));
std
::
vector
<
int
>
channels
=
ctx
->
Attrs
().
Get
<
std
::
vector
<
int
>>
(
"split_channels"
);
if
(
channels
.
size
())
{
PADDLE_ENFORCE
(
ctx
->
HasOutputs
(
"Outputs"
),
"Output(Outputs) of ConvOp should not be null."
);
std
::
vector
<
framework
::
DDim
>
oshapes
;
oshapes
.
reserve
(
channels
.
size
());
for
(
size_t
i
=
0
;
i
<
channels
.
size
();
++
i
)
{
oshapes
.
push_back
({
oshape
[
0
],
channels
[
i
],
oshape
[
2
],
oshape
[
3
]});
}
ctx
->
SetOutputsDim
(
"Outputs"
,
oshapes
);
}
}
};
// TODO(qingqing): add gradient operator for conv2d_fusion
// TODO(qingqing): add gradient operator for conv2d_fusion
}
// namespace operators
}
// namespace operators
...
@@ -45,4 +104,5 @@ class Conv2DFusionOpMaker : public Conv2DOpMaker {
...
@@ -45,4 +104,5 @@ class Conv2DFusionOpMaker : public Conv2DOpMaker {
namespace
ops
=
paddle
::
operators
;
namespace
ops
=
paddle
::
operators
;
REGISTER_OPERATOR
(
conv2d_fusion
,
ops
::
ConvOp
,
ops
::
Conv2DFusionOpMaker
,
REGISTER_OPERATOR
(
conv2d_fusion
,
ops
::
ConvOp
,
ops
::
Conv2DFusionOpMaker
,
ops
::
ConvOpInferVarType
,
paddle
::
framework
::
EmptyGradOpMaker
);
ops
::
Conv2DFusionOpInferShape
,
ops
::
ConvOpInferVarType
,
paddle
::
framework
::
EmptyGradOpMaker
);
paddle/fluid/operators/conv_fusion_op.cu.cc
浏览文件 @
48324c32
...
@@ -16,8 +16,9 @@ limitations under the License. */
...
@@ -16,8 +16,9 @@ limitations under the License. */
#include "paddle/fluid/operators/conv_cudnn_op_cache.h"
#include "paddle/fluid/operators/conv_cudnn_op_cache.h"
#include "paddle/fluid/platform/cudnn_helper.h"
#include "paddle/fluid/platform/cudnn_helper.h"
DECLARE_uint64
(
conv_workspace_size_limit
);
DEFINE_int64
(
cudnn_exhaustive_search_times
,
-
1
,
DECLARE_bool
(
cudnn_exhaustive_search
);
"Exhaustive search times for cuDNN convolution, "
"defalut is 1, only search once."
);
namespace
paddle
{
namespace
paddle
{
namespace
operators
{
namespace
operators
{
...
@@ -117,41 +118,60 @@ class CUDNNConvFusionOpKernel : public framework::OpKernel<T> {
...
@@ -117,41 +118,60 @@ class CUDNNConvFusionOpKernel : public framework::OpKernel<T> {
workspace_size_limit
,
&
algo
));
workspace_size_limit
,
&
algo
));
VLOG
(
3
)
<<
"cuDNN forward algo "
<<
algo
;
VLOG
(
3
)
<<
"cuDNN forward algo "
<<
algo
;
}
else
{
}
else
{
auto
search_func
=
[
&
]()
{
int
returned_algo_count
;
std
::
array
<
cudnnConvolutionFwdAlgoPerf_t
,
kNUM_CUDNN_FWD_ALGS
>
fwd_perf_stat
;
auto
cudnn_find_func
=
[
&
](
void
*
cudnn_workspace
)
{
CUDNN_ENFORCE
(
platform
::
dynload
::
cudnnFindConvolutionForwardAlgorithmEx
(
handle
,
cudnn_input_desc
,
input_data
,
cudnn_filter_desc
,
filter_data
,
cudnn_conv_desc
,
cudnn_output_desc
,
output_data
,
kNUM_CUDNN_FWD_ALGS
,
&
returned_algo_count
,
fwd_perf_stat
.
data
(),
cudnn_workspace
,
workspace_size_limit
));
};
workspace_handle
.
RunFunc
(
cudnn_find_func
,
workspace_size_limit
);
VLOG
(
3
)
<<
"Perf result: (algo: stat, time, memory)"
;
for
(
int
i
=
0
;
i
<
returned_algo_count
;
++
i
)
{
const
auto
&
stat
=
fwd_perf_stat
[
i
];
VLOG
(
3
)
<<
stat
.
algo
<<
": "
<<
stat
.
status
<<
" "
<<
stat
.
time
<<
" "
<<
stat
.
memory
;
}
return
fwd_perf_stat
[
0
].
algo
;
};
AlgorithmsCache
<
cudnnConvolutionFwdAlgo_t
>*
algo_cache
=
nullptr
;
AlgorithmsCache
<
cudnnConvolutionFwdAlgo_t
>*
algo_cache
=
nullptr
;
if
(
ctx
.
scope
().
FindVar
(
kCUDNNFwdAlgoCache
))
{
int
search_times
=
ctx
.
Attr
<
int
>
(
"search_times"
);
search_times
=
std
::
max
(
static_cast
<
int
>
(
FLAGS_cudnn_exhaustive_search_times
),
search_times
);
if
(
search_times
>
0
)
{
// The searched algo will be cached by `search_times` times for
// different input dimension. For other dimensions, select the algo
// of closest area.
auto
var_name
=
ctx
.
Inputs
(
"AlgoCache"
)[
0
];
algo_cache
=
algo_cache
=
ctx
.
scope
()
ctx
.
scope
()
.
FindVar
(
kCUDNNFwdAlgoCach
e
)
.
FindVar
(
var_nam
e
)
->
GetMutable
<
AlgorithmsCache
<
cudnnConvolutionFwdAlgo_t
>>
();
->
GetMutable
<
AlgorithmsCache
<
cudnnConvolutionFwdAlgo_t
>>
();
algo
=
algo_cache
->
GetAlgorithm
(
x_dims
[
2
]
*
x_dims
[
3
],
search_times
,
0
,
search_func
);
}
else
{
}
else
{
algo_cache
=
// Cache searched algo in Var(kCUDNNFwdAlgoCache).
const_cast
<
framework
::
Scope
&>
(
ctx
.
scope
())
// all conv ops use the same kCUDNNFwdAlgoCache variable.
.
Var
(
kCUDNNFwdAlgoCache
)
if
(
ctx
.
scope
().
FindVar
(
kCUDNNFwdAlgoCache
))
{
->
GetMutable
<
AlgorithmsCache
<
cudnnConvolutionFwdAlgo_t
>>
();
algo_cache
=
ctx
.
scope
()
.
FindVar
(
kCUDNNFwdAlgoCache
)
->
GetMutable
<
AlgorithmsCache
<
cudnnConvolutionFwdAlgo_t
>>
();
}
else
{
// TODO(qingqing) remove const_cast
algo_cache
=
const_cast
<
framework
::
Scope
*>
(
ctx
.
scope
().
parent
())
->
Var
(
kCUDNNFwdAlgoCache
)
->
GetMutable
<
AlgorithmsCache
<
cudnnConvolutionFwdAlgo_t
>>
();
}
algo
=
algo_cache
->
GetAlgorithm
(
x_dims
,
f_dims
,
strides
,
paddings
,
dilations
,
0
,
search_func
);
}
}
algo
=
algo_cache
->
GetAlgorithm
(
x_dims
,
f_dims
,
strides
,
paddings
,
dilations
,
0
,
[
&
]()
{
int
returned_algo_count
;
std
::
array
<
cudnnConvolutionFwdAlgoPerf_t
,
kNUM_CUDNN_FWD_ALGS
>
fwd_perf_stat
;
auto
cudnn_find_func
=
[
&
](
void
*
cudnn_workspace
)
{
CUDNN_ENFORCE
(
platform
::
dynload
::
cudnnFindConvolutionForwardAlgorithmEx
(
handle
,
cudnn_input_desc
,
input_data
,
cudnn_filter_desc
,
filter_data
,
cudnn_conv_desc
,
cudnn_output_desc
,
output_data
,
kNUM_CUDNN_FWD_ALGS
,
&
returned_algo_count
,
fwd_perf_stat
.
data
(),
cudnn_workspace
,
workspace_size_limit
));
};
workspace_handle
.
RunFunc
(
cudnn_find_func
,
workspace_size_limit
);
VLOG
(
3
)
<<
"Perf result: (algo: stat, time, memory)"
;
for
(
int
i
=
0
;
i
<
returned_algo_count
;
++
i
)
{
const
auto
&
stat
=
fwd_perf_stat
[
i
];
VLOG
(
3
)
<<
stat
.
algo
<<
": "
<<
stat
.
status
<<
" "
<<
stat
.
time
<<
" "
<<
stat
.
memory
;
}
return
fwd_perf_stat
[
0
].
algo
;
});
VLOG
(
3
)
<<
"choose algo "
<<
algo
;
VLOG
(
3
)
<<
"choose algo "
<<
algo
;
}
}
...
@@ -195,6 +215,27 @@ class CUDNNConvFusionOpKernel : public framework::OpKernel<T> {
...
@@ -195,6 +215,27 @@ class CUDNNConvFusionOpKernel : public framework::OpKernel<T> {
};
};
workspace_handle
.
RunFunc
(
cudnn_func
,
workspace_size_in_bytes
);
workspace_handle
.
RunFunc
(
cudnn_func
,
workspace_size_in_bytes
);
}
}
std
::
vector
<
int
>
channels
=
ctx
.
Attr
<
std
::
vector
<
int
>>
(
"split_channels"
);
if
(
channels
.
size
())
{
auto
outs
=
ctx
.
MultiOutput
<
framework
::
Tensor
>
(
"Outputs"
);
if
(
x_dims
[
0
]
==
1
)
{
// share data with Output
framework
::
Tensor
t
;
t
.
ShareDataWith
(
*
output
);
auto
y_dims
=
output
->
dims
();
t
.
Resize
({
y_dims
[
1
],
y_dims
[
2
],
y_dims
[
3
]});
int
s
=
0
;
for
(
size_t
i
=
0
;
i
<
channels
.
size
();
++
i
)
{
int
e
=
s
+
channels
[
i
];
outs
[
i
]
->
ShareDataWith
(
t
.
Slice
(
s
,
e
));
outs
[
i
]
->
Resize
({
x_dims
[
0
],
channels
[
i
],
y_dims
[
2
],
y_dims
[
3
]});
s
=
e
;
}
}
else
{
// TODO(qingiqng): do copy when batch size large than 1
PADDLE_THROW
(
"Batch size greater than 1 is Unsupported"
);
}
}
}
}
};
};
#endif
#endif
...
...
paddle/fluid/operators/distributed/collective_server_test.cc
浏览文件 @
48324c32
...
@@ -52,12 +52,12 @@ std::unique_ptr<framework::Scope> GenerateVars(platform::Place place) {
...
@@ -52,12 +52,12 @@ std::unique_ptr<framework::Scope> GenerateVars(platform::Place place) {
framework
::
Scope
*
scope
=
new
framework
::
Scope
();
framework
::
Scope
*
scope
=
new
framework
::
Scope
();
framework
::
Variable
*
var
=
scope
->
Var
(
"var1"
);
framework
::
Variable
*
var
=
scope
->
Var
(
"var1"
);
auto
*
slr
=
var
->
GetMutable
<
framework
::
SelectedRows
>
();
auto
*
slr
=
var
->
GetMutable
<
framework
::
SelectedRows
>
();
slr
->
set_height
(
1
000
);
slr
->
set_height
(
20
000
);
auto
*
tensor
=
slr
->
mutable_value
();
auto
*
tensor
=
slr
->
mutable_value
();
auto
*
rows
=
slr
->
mutable_rows
();
auto
*
rows
=
slr
->
mutable_rows
();
tensor
->
Resize
(
framework
::
make_ddim
({
3
,
5
}));
tensor
->
Resize
(
framework
::
make_ddim
({
20000
,
1024
}));
tensor
->
mutable_data
<
float
>
(
place
);
tensor
->
mutable_data
<
float
>
(
place
);
paddle
::
operators
::
math
::
set_constant
(
ctx
,
tensor
,
32.7
);
paddle
::
operators
::
math
::
set_constant
(
ctx
,
tensor
,
32.7
);
...
@@ -83,6 +83,7 @@ void Gather(const std::vector<distributed::RemoteVar>& vars,
...
@@ -83,6 +83,7 @@ void Gather(const std::vector<distributed::RemoteVar>& vars,
}
}
TEST
(
PREFETCH
,
GPU
)
{
TEST
(
PREFETCH
,
GPU
)
{
setenv
(
"FLAGS_max_body_size"
,
"2147483647"
,
1
);
platform
::
CUDAPlace
place
;
platform
::
CUDAPlace
place
;
platform
::
DeviceContextPool
&
pool
=
platform
::
DeviceContextPool
::
Instance
();
platform
::
DeviceContextPool
&
pool
=
platform
::
DeviceContextPool
::
Instance
();
auto
&
ctx
=
*
pool
.
Get
(
place
);
auto
&
ctx
=
*
pool
.
Get
(
place
);
...
...
paddle/fluid/operators/fused/CMakeLists.txt
浏览文件 @
48324c32
include
(
operators
)
include
(
operators
)
register_operators
(
EXCLUDES fusion_transpose_flatten_concat_op
)
register_operators
(
EXCLUDES fusion_transpose_flatten_concat_op
fusion_conv_inception_op
)
if
(
WITH_GPU
)
if
(
WITH_GPU
)
op_library
(
fusion_transpose_flatten_concat_op
)
op_library
(
fusion_transpose_flatten_concat_op
)
op_library
(
fusion_conv_inception_op
)
file
(
APPEND
${
pybind_file
}
"USE_CUDA_ONLY_OP(fusion_transpose_flatten_concat);
\n
"
)
file
(
APPEND
${
pybind_file
}
"USE_CUDA_ONLY_OP(fusion_transpose_flatten_concat);
\n
"
)
file
(
APPEND
${
pybind_file
}
"USE_CUDA_ONLY_OP(conv2d_inception_fusion);
\n
"
)
endif
()
endif
()
paddle/fluid/operators/fused/fusion_conv_inception_op.cc
0 → 100644
浏览文件 @
48324c32
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <string>
#include <vector>
#include "paddle/fluid/framework/op_registry.h"
#ifdef PADDLE_WITH_CUDA
#include "paddle/fluid/platform/cudnn_helper.h"
#endif
namespace
paddle
{
namespace
operators
{
class
ConvInceptionFusionOp
:
public
framework
::
OperatorWithKernel
{
public:
using
framework
::
OperatorWithKernel
::
OperatorWithKernel
;
void
InferShape
(
framework
::
InferShapeContext
*
ctx
)
const
override
{
// 1 x
auto
in_dims
=
ctx
->
GetInputDim
(
"Input"
);
// 4 filters
auto
w_dims
=
ctx
->
GetInputsDim
(
"Filter"
);
PADDLE_ENFORCE
(
in_dims
.
size
(),
4
,
"Conv intput should be 4-D tensor."
);
PADDLE_ENFORCE_EQ
(
w_dims
.
size
(),
4
,
"There should be 4 filters"
);
PADDLE_ENFORCE_EQ
(
w_dims
[
0
][
1
],
in_dims
[
1
]);
PADDLE_ENFORCE_EQ
(
w_dims
[
1
][
1
],
in_dims
[
1
]);
int
n
=
in_dims
[
0
];
// compute output channel
// 1st channel
int
c
=
w_dims
[
0
][
0
];
// add 2nd channel
c
+=
(
w_dims
[
1
][
0
]
-
w_dims
[
2
][
1
]
*
2
);
// add 3rd channel
c
+=
(
w_dims
[
2
][
0
]
-
w_dims
[
3
][
1
]);
// add 4-th channel
c
+=
w_dims
[
3
][
0
];
int
h
=
in_dims
[
2
];
int
w
=
in_dims
[
3
];
ctx
->
SetOutputDim
(
"Output"
,
{
n
,
c
,
h
,
w
});
}
protected:
framework
::
OpKernelType
GetExpectedKernelType
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
return
framework
::
OpKernelType
(
ctx
.
Input
<
framework
::
LoDTensor
>
(
"Input"
)
->
type
(),
ctx
.
device_context
());
}
};
class
ConvInceptionFusionOpMaker
:
public
framework
::
OpProtoAndCheckerMaker
{
protected:
void
Make
()
override
{
AddInput
(
"Input"
,
"(Tensor) NCHW layout."
);
AddInput
(
"Filter"
,
"(vector<Tensor>) 4 aggregated filters"
).
AsDuplicable
();
AddInput
(
"Bias"
,
"(vector<Tensor>) it's lenght is equal to Filter"
)
.
AsDuplicable
();
AddOutput
(
"Output"
,
"(Tensor) The output tensor of convolution operator. "
"The format of output tensor is also NCHW."
);
AddOutput
(
"TempOutput"
,
""
).
AsDuplicable
();
AddAttr
<
std
::
string
>
(
"pooling_type"
,
"(string), pooling type, can be
\"
max
\"
for max-pooling "
"and
\"
avg
\"
for average-pooling."
)
.
InEnum
({
"max"
,
"avg"
});
AddAttr
<
bool
>
(
"exclusive"
,
"(bool, default True) When true, will exclude the zero-padding in the "
"averaging calculating, otherwise, include the zero-padding. Note, it "
"is only used when pooling_type is avg. The defalut is True."
)
.
SetDefault
(
true
);
AddAttr
<
std
::
string
>
(
"activation"
,
"The activation type can be 'identity', 'sigmoid', 'relu', 'relu6' "
"'relux' , 'tanh', 'band_pass'"
)
.
SetDefault
(
"relu"
);
AddAttr
<
int
>
(
"workspace_size_MB"
,
"Only used in cudnn kernel. Need set use_cudnn to true."
"workspace size for cudnn, in MB, "
"workspace is a section of GPU memory which will be "
"allocated/freed each time the operator runs, larger "
"workspace size can increase performance but also requires "
"better hardware. This size should be chosen carefully."
)
.
SetDefault
(
4096
);
AddComment
(
R"DOC(
)DOC"
);
}
};
}
// namespace operators
}
// namespace paddle
namespace
ops
=
paddle
::
operators
;
REGISTER_OPERATOR
(
conv2d_inception_fusion
,
ops
::
ConvInceptionFusionOp
,
ops
::
ConvInceptionFusionOpMaker
,
paddle
::
framework
::
EmptyGradOpMaker
);
paddle/fluid/operators/fused/fusion_conv_inception_op.cu
0 → 100644
浏览文件 @
48324c32
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/conv_cudnn_op_cache.h"
#include "paddle/fluid/platform/cudnn_helper.h"
DECLARE_uint64
(
conv_workspace_size_limit
);
namespace
paddle
{
namespace
operators
{
#if CUDNN_VERSION >= 7001
using
Tensor
=
framework
::
Tensor
;
using
ScopedTensorDescriptor
=
platform
::
ScopedTensorDescriptor
;
using
ScopedFilterDescriptor
=
platform
::
ScopedFilterDescriptor
;
using
ScopedConvolutionDescriptor
=
platform
::
ScopedConvolutionDescriptor
;
using
ScopedActivationDescriptor
=
platform
::
ScopedActivationDescriptor
;
using
DataLayout
=
platform
::
DataLayout
;
using
ScopedPoolingDescriptor
=
platform
::
ScopedPoolingDescriptor
;
using
PoolingMode
=
platform
::
PoolingMode
;
template
<
typename
T
>
using
ScalingParamType
=
typename
platform
::
CudnnDataType
<
T
>::
ScalingParamType
;
template
<
typename
T
>
using
CudnnDataType
=
platform
::
CudnnDataType
<
T
>
;
template
<
typename
T
>
class
CUDNNConvInceptionFusionOpKernel
:
public
framework
::
OpKernel
<
T
>
{
public:
void
Compute
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
auto
&
dev_ctx
=
ctx
.
template
device_context
<
platform
::
CUDADeviceContext
>();
auto
*
input
=
ctx
.
Input
<
Tensor
>
(
"Input"
);
auto
filters
=
ctx
.
MultiInput
<
framework
::
Tensor
>
(
"Filter"
);
auto
bias
=
ctx
.
MultiInput
<
framework
::
Tensor
>
(
"Bias"
);
auto
*
output
=
ctx
.
Output
<
Tensor
>
(
"Output"
);
auto
temp_outs
=
ctx
.
MultiOutput
<
framework
::
Tensor
>
(
"TempOutput"
);
const
std
::
string
pool_type
=
ctx
.
Attr
<
std
::
string
>
(
"pooling_type"
);
const
std
::
string
activation
=
ctx
.
Attr
<
std
::
string
>
(
"activation"
);
const
bool
exclusive
=
ctx
.
Attr
<
bool
>
(
"exclusive"
);
int64_t
user_workspace_size
=
static_cast
<
size_t
>
(
ctx
.
Attr
<
int
>
(
"workspace_size_MB"
));
const
T
*
input_data
=
input
->
data
<
T
>
();
T
*
output_data
=
output
->
mutable_data
<
T
>
(
ctx
.
GetPlace
());
T
*
temp_data
=
temp_outs
[
0
]
->
mutable_data
<
T
>
(
input
->
dims
(),
ctx
.
GetPlace
());
DataLayout
layout
=
DataLayout
::
kNCHW
;
std
::
vector
<
int
>
in_dim
=
framework
::
vectorize2int
(
input
->
dims
());
// ------------------- cudnn descriptors ---------------------
PoolingMode
pooling_mode
;
if
(
pool_type
==
"max"
)
{
pooling_mode
=
PoolingMode
::
kMaximum
;
}
else
{
pooling_mode
=
exclusive
?
PoolingMode
::
kAverageExclusive
:
(
PoolingMode
::
kAverageInclusive
);
}
std
::
vector
<
int
>
k0x0
=
{
0
,
0
};
std
::
vector
<
int
>
k1x1
=
{
1
,
1
};
std
::
vector
<
int
>
k1x1_2
=
{
1
,
1
};
std
::
vector
<
int
>
k3x3
=
{
3
,
3
};
ScopedPoolingDescriptor
pool_desc
;
ScopedActivationDescriptor
act_desc
;
ScopedTensorDescriptor
out_pool_desc
;
ScopedTensorDescriptor
input_desc
;
cudnnPoolingDescriptor_t
cudnn_pool_desc
=
pool_desc
.
descriptor
(
pooling_mode
,
k3x3
,
k1x1
,
k1x1
);
cudnnTensorDescriptor_t
cudnn_input_desc
=
input_desc
.
descriptor
<
T
>
(
layout
,
framework
::
vectorize2int
(
input
->
dims
()));
cudnnTensorDescriptor_t
pool_out_desc
=
out_pool_desc
.
descriptor
<
T
>
(
layout
,
framework
::
vectorize2int
(
input
->
dims
()));
cudnnDataType_t
cudnn_dtype
=
CudnnDataType
<
T
>::
type
;
cudnnTensorDescriptor_t
*
out_desc
=
new
cudnnTensorDescriptor_t
[
4
];
cudnnFilterDescriptor_t
*
filter_desc
=
new
cudnnFilterDescriptor_t
[
4
];
cudnnTensorDescriptor_t
*
bias_desc
=
new
cudnnTensorDescriptor_t
[
4
];
cudnnTensorDescriptor_t
*
in_desc
=
new
cudnnTensorDescriptor_t
[
4
];
cudnnConvolutionDescriptor_t
*
conv_desc
=
new
cudnnConvolutionDescriptor_t
[
4
];
for
(
int
i
=
0
;
i
<
4
;
++
i
)
{
CUDNN_ENFORCE
(
platform
::
dynload
::
cudnnCreateFilterDescriptor
(
&
filter_desc
[
i
]));
CUDNN_ENFORCE
(
platform
::
dynload
::
cudnnCreateTensorDescriptor
(
&
bias_desc
[
i
]));
CUDNN_ENFORCE
(
platform
::
dynload
::
cudnnCreateTensorDescriptor
(
&
in_desc
[
i
]));
CUDNN_ENFORCE
(
platform
::
dynload
::
cudnnCreateTensorDescriptor
(
&
out_desc
[
i
]));
CUDNN_ENFORCE
(
platform
::
dynload
::
cudnnCreateConvolutionDescriptor
(
&
conv_desc
[
i
]));
}
std
::
vector
<
std
::
vector
<
int
>>
filter_dims
;
std
::
vector
<
std
::
vector
<
int
>>
bias_dims
;
std
::
vector
<
std
::
vector
<
int
>>
in_dims
;
std
::
vector
<
std
::
vector
<
int
>>
out_dims
;
std
::
vector
<
std
::
vector
<
int
>>
in_strides
;
std
::
vector
<
std
::
vector
<
int
>>
out_strides
;
std
::
vector
<
std
::
vector
<
int
>>
bias_strides
;
cudnnTensorFormat_t
format
=
CUDNN_TENSOR_NCHW
;
int
n
=
in_dim
[
0
];
int
h
=
in_dim
[
2
];
int
w
=
in_dim
[
3
];
int
oc
=
output
->
dims
()[
1
];
cudnnDataType_t
compute_type
=
(
cudnn_dtype
==
CUDNN_DATA_DOUBLE
)
?
CUDNN_DATA_DOUBLE
:
CUDNN_DATA_FLOAT
;
for
(
int
i
=
0
;
i
<
4
;
++
i
)
{
filter_dims
.
push_back
(
framework
::
vectorize2int
(
filters
[
i
]
->
dims
()));
CUDNN_ENFORCE
(
platform
::
dynload
::
cudnnSetFilterNdDescriptor
(
filter_desc
[
i
],
cudnn_dtype
,
format
,
4
,
filter_dims
[
i
].
data
()));
bias_dims
.
push_back
({
1
,
filter_dims
[
i
][
0
],
1
,
1
});
bias_strides
.
push_back
({
filter_dims
[
i
][
0
],
1
,
1
,
1
});
CUDNN_ENFORCE
(
platform
::
dynload
::
cudnnSetTensorNdDescriptor
(
bias_desc
[
i
],
cudnn_dtype
,
4
,
bias_dims
[
i
].
data
(),
bias_strides
[
i
].
data
()));
in_dims
.
push_back
({
n
,
filter_dims
[
i
][
1
],
h
,
w
});
out_dims
.
push_back
({
n
,
filter_dims
[
i
][
0
],
h
,
w
});
in_strides
.
push_back
({
filter_dims
[
i
][
1
]
*
h
*
w
,
h
*
w
,
w
,
1
});
out_strides
.
push_back
({
oc
*
h
*
w
,
h
*
w
,
w
,
1
});
if
(
i
<
2
)
{
CUDNN_ENFORCE
(
platform
::
dynload
::
cudnnSetConvolutionNdDescriptor
(
conv_desc
[
i
],
2
,
k0x0
.
data
(),
k1x1
.
data
(),
k1x1
.
data
(),
CUDNN_CROSS_CORRELATION
,
compute_type
));
}
else
{
CUDNN_ENFORCE
(
platform
::
dynload
::
cudnnSetConvolutionNdDescriptor
(
conv_desc
[
i
],
2
,
k1x1
.
data
(),
k1x1
.
data
(),
k1x1
.
data
(),
CUDNN_CROSS_CORRELATION
,
compute_type
));
}
CUDNN_ENFORCE
(
platform
::
dynload
::
cudnnSetConvolutionMathType
(
conv_desc
[
i
],
CUDNN_DEFAULT_MATH
));
}
in_dims
[
2
][
1
]
*=
2
;
in_strides
[
2
][
0
]
=
oc
*
h
*
w
;
out_strides
[
2
][
0
]
=
filter_dims
[
2
][
0
]
*
h
*
w
;
// this out is continuous.
in_strides
[
3
][
0
]
=
filter_dims
[
2
][
0
]
*
h
*
w
;
CUDNN_ENFORCE
(
platform
::
dynload
::
cudnnSetConvolutionGroupCount
(
conv_desc
[
2
],
2
));
cudnnConvolutionFwdAlgo_t
algo
[
4
];
auto
handle
=
dev_ctx
.
cudnn_handle
();
size_t
workspace_size_in_bytes
=
0
;
// final workspace to allocate.
size_t
workspace_size_limit
=
kCONV_CUDNN_WORKSPACE_LIMIT_BYTES
;
if
(
FLAGS_conv_workspace_size_limit
>
0
||
user_workspace_size
>
0
)
{
int64_t
max_user_size
=
std
::
max
(
static_cast
<
int64_t
>
(
FLAGS_conv_workspace_size_limit
),
user_workspace_size
);
workspace_size_limit
=
max_user_size
*
1024
*
1024
;
}
for
(
int
i
=
0
;
i
<
4
;
++
i
)
{
CUDNN_ENFORCE
(
platform
::
dynload
::
cudnnSetTensorNdDescriptor
(
in_desc
[
i
],
cudnn_dtype
,
4
,
in_dims
[
i
].
data
(),
in_strides
[
i
].
data
()));
CUDNN_ENFORCE
(
platform
::
dynload
::
cudnnSetTensorNdDescriptor
(
out_desc
[
i
],
cudnn_dtype
,
4
,
out_dims
[
i
].
data
(),
out_strides
[
i
].
data
()));
CUDNN_ENFORCE
(
platform
::
dynload
::
cudnnGetConvolutionForwardAlgorithm
(
handle
,
in_desc
[
i
],
filter_desc
[
i
],
conv_desc
[
i
],
out_desc
[
i
],
CUDNN_CONVOLUTION_FWD_SPECIFY_WORKSPACE_LIMIT
,
workspace_size_limit
,
&
algo
[
i
]));
size_t
tmp_size
=
0
;
CUDNN_ENFORCE
(
platform
::
dynload
::
cudnnGetConvolutionForwardWorkspaceSize
(
handle
,
in_desc
[
i
],
filter_desc
[
i
],
conv_desc
[
i
],
out_desc
[
i
],
algo
[
i
],
&
tmp_size
));
workspace_size_in_bytes
=
std
::
max
(
workspace_size_in_bytes
,
tmp_size
);
}
cudnnActivationDescriptor_t
cudnn_act_desc
=
act_desc
.
descriptor
<
T
>
(
activation
);
int
oc0
=
filter_dims
[
0
][
0
];
int
oc1
=
filter_dims
[
1
][
0
]
-
filter_dims
[
2
][
1
]
*
2
;
int
oc3
=
filter_dims
[
3
][
0
];
int
oc2
=
oc
-
oc0
-
oc1
-
oc3
;
// branch1: pool + 1x1 conv
ScalingParamType
<
T
>
alpha
=
1.0
f
,
beta
=
0.0
f
;
CUDNN_ENFORCE
(
platform
::
dynload
::
cudnnPoolingForward
(
handle
,
cudnn_pool_desc
,
&
alpha
,
cudnn_input_desc
,
input_data
,
&
beta
,
pool_out_desc
,
temp_data
));
std
::
vector
<
const
void
*>
in_datas
;
in_datas
.
push_back
(
static_cast
<
const
void
*>
(
temp_data
));
in_datas
.
push_back
(
static_cast
<
const
void
*>
(
input_data
));
in_datas
.
push_back
(
static_cast
<
const
void
*>
(
output_data
+
(
oc0
+
oc1
)
*
h
*
w
));
T
*
temp2_data
=
temp_outs
[
1
]
->
mutable_data
<
T
>
(
framework
::
make_ddim
(
out_dims
[
2
]),
ctx
.
GetPlace
());
in_datas
.
push_back
(
static_cast
<
const
void
*>
(
temp2_data
+
oc2
*
h
*
w
));
std
::
vector
<
void
*>
out_datas
;
out_datas
.
push_back
(
static_cast
<
void
*>
(
output_data
));
out_datas
.
push_back
(
static_cast
<
void
*>
(
output_data
+
oc0
*
h
*
w
));
out_datas
.
push_back
(
static_cast
<
void
*>
(
temp2_data
));
out_datas
.
push_back
(
static_cast
<
void
*>
(
output_data
+
(
oc0
+
oc1
+
oc2
)
*
h
*
w
));
for
(
int
i
=
0
;
i
<
4
;
++
i
)
{
auto
func
=
[
&
](
void
*
cudnn_workspace
)
{
CUDNN_ENFORCE
(
platform
::
dynload
::
cudnnConvolutionBiasActivationForward
(
handle
,
&
alpha
,
in_desc
[
i
],
in_datas
[
i
],
filter_desc
[
i
],
static_cast
<
const
void
*>
(
filters
[
i
]
->
data
<
T
>
()),
conv_desc
[
i
],
algo
[
i
],
cudnn_workspace
,
workspace_size_in_bytes
,
&
beta
,
out_desc
[
i
],
out_datas
[
i
],
bias_desc
[
i
],
static_cast
<
const
void
*>
(
bias
[
i
]
->
data
<
T
>
()),
cudnn_act_desc
,
out_desc
[
i
],
out_datas
[
i
]));
};
auto
workspace_handle
=
dev_ctx
.
cudnn_workspace_handle
();
workspace_handle
.
RunFunc
(
func
,
workspace_size_in_bytes
);
}
cudnnTensorDescriptor_t
x_desc
;
cudnnTensorDescriptor_t
y_desc
;
CUDNN_ENFORCE
(
platform
::
dynload
::
cudnnCreateTensorDescriptor
(
&
x_desc
));
CUDNN_ENFORCE
(
platform
::
dynload
::
cudnnCreateTensorDescriptor
(
&
y_desc
));
CUDNN_ENFORCE
(
platform
::
dynload
::
cudnnSetTensorNdDescriptor
(
x_desc
,
cudnn_dtype
,
4
,
out_dims
[
3
].
data
(),
out_strides
[
2
].
data
()));
CUDNN_ENFORCE
(
platform
::
dynload
::
cudnnSetTensorNdDescriptor
(
y_desc
,
cudnn_dtype
,
4
,
out_dims
[
3
].
data
(),
out_strides
[
3
].
data
()));
CUDNN_ENFORCE
(
platform
::
dynload
::
cudnnTransformTensor
(
handle
,
CudnnDataType
<
T
>::
kOne
(),
x_desc
,
static_cast
<
const
void
*>
(
out_datas
[
2
]),
CudnnDataType
<
T
>::
kZero
(),
y_desc
,
static_cast
<
void
*>
(
output_data
+
(
oc0
+
oc1
)
*
h
*
w
)));
for
(
int
i
=
0
;
i
<
4
;
++
i
)
{
CUDNN_ENFORCE
(
platform
::
dynload
::
cudnnDestroyTensorDescriptor
(
in_desc
[
i
]));
CUDNN_ENFORCE
(
platform
::
dynload
::
cudnnDestroyTensorDescriptor
(
out_desc
[
i
]));
CUDNN_ENFORCE
(
platform
::
dynload
::
cudnnDestroyFilterDescriptor
(
filter_desc
[
i
]));
CUDNN_ENFORCE
(
platform
::
dynload
::
cudnnDestroyTensorDescriptor
(
bias_desc
[
i
]));
CUDNN_ENFORCE
(
platform
::
dynload
::
cudnnDestroyConvolutionDescriptor
(
conv_desc
[
i
]));
}
CUDNN_ENFORCE
(
platform
::
dynload
::
cudnnDestroyTensorDescriptor
(
x_desc
));
CUDNN_ENFORCE
(
platform
::
dynload
::
cudnnDestroyTensorDescriptor
(
y_desc
));
}
};
#endif
}
// namespace operators
}
// namespace paddle
#if CUDNN_VERSION >= 7001
namespace
ops
=
paddle
::
operators
;
REGISTER_OP_CUDA_KERNEL
(
conv2d_inception_fusion
,
ops
::
CUDNNConvInceptionFusionOpKernel
<
float
>
,
ops
::
CUDNNConvInceptionFusionOpKernel
<
double
>
);
#endif
paddle/fluid/platform/CMakeLists.txt
浏览文件 @
48324c32
...
@@ -84,6 +84,9 @@ cc_test(init_test SRCS init_test.cc DEPS device_context)
...
@@ -84,6 +84,9 @@ cc_test(init_test SRCS init_test.cc DEPS device_context)
nv_test
(
cudnn_helper_test SRCS cudnn_helper_test.cc DEPS dynload_cuda
)
nv_test
(
cudnn_helper_test SRCS cudnn_helper_test.cc DEPS dynload_cuda
)
nv_test
(
transform_test SRCS transform_test.cu DEPS memory place device_context
)
nv_test
(
transform_test SRCS transform_test.cu DEPS memory place device_context
)
cc_library
(
timer SRCS timer.cc
)
cc_test
(
timer_test SRCS timer_test.cc DEPS timer
)
cc_library
(
device_tracer SRCS device_tracer.cc DEPS boost profiler_proto framework_proto
${
GPU_CTX_DEPS
}
)
cc_library
(
device_tracer SRCS device_tracer.cc DEPS boost profiler_proto framework_proto
${
GPU_CTX_DEPS
}
)
cc_library
(
profiler SRCS profiler.cc DEPS device_context device_tracer
)
cc_library
(
profiler SRCS profiler.cc DEPS device_context device_tracer
)
cc_test
(
profiler_test SRCS profiler_test.cc DEPS profiler
)
cc_test
(
profiler_test SRCS profiler_test.cc DEPS profiler
)
...
...
paddle/fluid/platform/dynload/cudnn.cc
浏览文件 @
48324c32
...
@@ -38,6 +38,10 @@ CUDNN_DNN_ROUTINE_EACH_AFTER_R4(DEFINE_WRAP);
...
@@ -38,6 +38,10 @@ CUDNN_DNN_ROUTINE_EACH_AFTER_R4(DEFINE_WRAP);
CUDNN_DNN_ROUTINE_EACH_R5
(
DEFINE_WRAP
);
CUDNN_DNN_ROUTINE_EACH_R5
(
DEFINE_WRAP
);
#endif
#endif
#ifdef CUDNN_DNN_ROUTINE_EACH_R6
CUDNN_DNN_ROUTINE_EACH_R6
(
DEFINE_WRAP
);
#endif
#ifdef CUDNN_DNN_ROUTINE_EACH_R7
#ifdef CUDNN_DNN_ROUTINE_EACH_R7
CUDNN_DNN_ROUTINE_EACH_R7
(
DEFINE_WRAP
);
CUDNN_DNN_ROUTINE_EACH_R7
(
DEFINE_WRAP
);
#endif
#endif
...
...
paddle/fluid/platform/dynload/dynamic_loader.cc
浏览文件 @
48324c32
...
@@ -53,6 +53,12 @@ namespace platform {
...
@@ -53,6 +53,12 @@ namespace platform {
namespace
dynload
{
namespace
dynload
{
static
constexpr
char
cupti_lib_path
[]
=
CUPTI_LIB_PATH
;
static
constexpr
char
cupti_lib_path
[]
=
CUPTI_LIB_PATH
;
#if defined(_WIN32) && defined(PADDLE_WITH_CUDA)
static
constexpr
char
*
win_cublas_lib
=
"cublas64_"
PADDLE_CUDA_BINVER
".dll"
;
static
constexpr
char
*
win_curand_lib
=
"curand64_"
PADDLE_CUDA_BINVER
".dll"
;
static
constexpr
char
*
win_cudnn_lib
=
"cudnn64_"
PADDLE_CUDNN_BINVER
".dll"
;
#endif
static
inline
std
::
string
join
(
const
std
::
string
&
part1
,
static
inline
std
::
string
join
(
const
std
::
string
&
part1
,
const
std
::
string
&
part2
)
{
const
std
::
string
&
part2
)
{
// directory separator
// directory separator
...
@@ -165,6 +171,8 @@ static inline void* GetDsoHandleFromSearchPath(const std::string& search_root,
...
@@ -165,6 +171,8 @@ static inline void* GetDsoHandleFromSearchPath(const std::string& search_root,
void
*
GetCublasDsoHandle
()
{
void
*
GetCublasDsoHandle
()
{
#if defined(__APPLE__) || defined(__OSX__)
#if defined(__APPLE__) || defined(__OSX__)
return
GetDsoHandleFromSearchPath
(
FLAGS_cuda_dir
,
"libcublas.dylib"
);
return
GetDsoHandleFromSearchPath
(
FLAGS_cuda_dir
,
"libcublas.dylib"
);
#elif defined(_WIN32) && defined(PADDLE_WITH_CUDA)
return
GetDsoHandleFromSearchPath
(
FLAGS_cuda_dir
,
win_cublas_lib
);
#else
#else
return
GetDsoHandleFromSearchPath
(
FLAGS_cuda_dir
,
"libcublas.so"
);
return
GetDsoHandleFromSearchPath
(
FLAGS_cuda_dir
,
"libcublas.so"
);
#endif
#endif
...
@@ -173,6 +181,8 @@ void* GetCublasDsoHandle() {
...
@@ -173,6 +181,8 @@ void* GetCublasDsoHandle() {
void
*
GetCUDNNDsoHandle
()
{
void
*
GetCUDNNDsoHandle
()
{
#if defined(__APPLE__) || defined(__OSX__)
#if defined(__APPLE__) || defined(__OSX__)
return
GetDsoHandleFromSearchPath
(
FLAGS_cudnn_dir
,
"libcudnn.dylib"
,
false
);
return
GetDsoHandleFromSearchPath
(
FLAGS_cudnn_dir
,
"libcudnn.dylib"
,
false
);
#elif defined(_WIN32) && defined(PADDLE_WITH_CUDA)
return
GetDsoHandleFromSearchPath
(
FLAGS_cudnn_dir
,
win_cudnn_lib
);
#else
#else
return
GetDsoHandleFromSearchPath
(
FLAGS_cudnn_dir
,
"libcudnn.so"
,
false
);
return
GetDsoHandleFromSearchPath
(
FLAGS_cudnn_dir
,
"libcudnn.so"
,
false
);
#endif
#endif
...
@@ -193,6 +203,8 @@ void* GetCUPTIDsoHandle() {
...
@@ -193,6 +203,8 @@ void* GetCUPTIDsoHandle() {
void
*
GetCurandDsoHandle
()
{
void
*
GetCurandDsoHandle
()
{
#if defined(__APPLE__) || defined(__OSX__)
#if defined(__APPLE__) || defined(__OSX__)
return
GetDsoHandleFromSearchPath
(
FLAGS_cuda_dir
,
"libcurand.dylib"
);
return
GetDsoHandleFromSearchPath
(
FLAGS_cuda_dir
,
"libcurand.dylib"
);
#elif defined(_WIN32) && defined(PADDLE_WITH_CUDA)
return
GetDsoHandleFromSearchPath
(
FLAGS_cuda_dir
,
win_curand_lib
);
#else
#else
return
GetDsoHandleFromSearchPath
(
FLAGS_cuda_dir
,
"libcurand.so"
);
return
GetDsoHandleFromSearchPath
(
FLAGS_cuda_dir
,
"libcurand.so"
);
#endif
#endif
...
...
paddle/fluid/platform/timer.cc
0 → 100644
浏览文件 @
48324c32
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/platform/timer.h"
namespace
paddle
{
namespace
platform
{
void
Timer
::
Reset
()
{
_start
.
tv_sec
=
0
;
_start
.
tv_usec
=
0
;
_count
=
0
;
_elapsed
=
0
;
_paused
=
true
;
}
void
Timer
::
Start
()
{
Reset
();
Resume
();
}
void
Timer
::
Pause
()
{
if
(
_paused
)
{
return
;
}
_elapsed
+=
Tickus
();
++
_count
;
_paused
=
true
;
}
void
Timer
::
Resume
()
{
gettimeofday
(
&
_start
,
NULL
);
_paused
=
false
;
}
int
Timer
::
Count
()
{
return
_count
;
}
double
Timer
::
ElapsedUS
()
{
return
static_cast
<
double
>
(
_elapsed
);
}
double
Timer
::
ElapsedMS
()
{
return
_elapsed
/
1000.0
;
}
double
Timer
::
ElapsedSec
()
{
return
_elapsed
/
1000000.0
;
}
int64_t
Timer
::
Tickus
()
{
gettimeofday
(
&
_now
,
NULL
);
return
(
_now
.
tv_sec
-
_start
.
tv_sec
)
*
1000
*
1000L
+
(
_now
.
tv_usec
-
_start
.
tv_usec
);
}
}
// namespace platform
}
// namespace paddle
paddle/fluid/platform/timer.h
0 → 100644
浏览文件 @
48324c32
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <stdlib.h>
#include "paddle/fluid/platform/port.h"
#ifdef _WIN32
static
unsigned
sleep
(
unsigned
seconds
)
{
Sleep
(
seconds
*
1000
);
return
0
;
}
#endif
namespace
paddle
{
namespace
platform
{
// A Standard Timer implementation for debugging
class
Timer
{
public:
// a timer class for profiling
// Reset() will be called during initialization
// all timing variables will be set 0 in Reset()
Timer
()
{
Reset
();
}
void
Reset
();
void
Start
();
void
Pause
();
// Resume will get current system time
void
Resume
();
int
Count
();
// return elapsed time in us
double
ElapsedUS
();
// return elapsed time in ms
double
ElapsedMS
();
// return elapsed time in sec
double
ElapsedSec
();
private:
struct
timeval
_start
;
struct
timeval
_now
;
int
_count
;
int
_elapsed
;
bool
_paused
;
// get us difference between start and now
int64_t
Tickus
();
};
}
// namespace platform
}
// namespace paddle
paddle/fluid/platform/timer_test.cc
0 → 100644
浏览文件 @
48324c32
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/platform/timer.h"
#include "gtest/gtest.h"
TEST
(
Timer
,
Reset
)
{
paddle
::
platform
::
Timer
timeline
;
timeline
.
Start
();
sleep
(
3
);
timeline
.
Pause
();
timeline
.
Reset
();
}
TEST
(
Timer
,
Start
)
{
paddle
::
platform
::
Timer
timeline
;
timeline
.
Start
();
sleep
(
3
);
timeline
.
Pause
();
}
TEST
(
Timer
,
Pause
)
{
paddle
::
platform
::
Timer
timeline
;
timeline
.
Start
();
sleep
(
3
);
timeline
.
Pause
();
}
TEST
(
Timer
,
Resume
)
{
paddle
::
platform
::
Timer
timeline
;
timeline
.
Start
();
sleep
(
3
);
timeline
.
Pause
();
timeline
.
Resume
();
}
paddle/fluid/pybind/pybind.cc
浏览文件 @
48324c32
...
@@ -84,11 +84,15 @@ bool IsCompiledWithCUDA() {
...
@@ -84,11 +84,15 @@ bool IsCompiledWithCUDA() {
}
}
bool
IsCompiledWithBrpc
()
{
bool
IsCompiledWithBrpc
()
{
#if defined(PADDLE_WITH_BRPC) || defined(PADDLE_WITH_BRPC_RDMA)
#ifndef PADDLE_WITH_DISTRIBUTE
return
true
;
#else
return
false
;
return
false
;
#endif
#endif
#ifdef PADDLE_WITH_GRPC
return
false
;
#endif
return
true
;
}
}
bool
IsCompiledWithDIST
()
{
bool
IsCompiledWithDIST
()
{
...
...
paddle/testing/paddle_gtest_main.cc
浏览文件 @
48324c32
...
@@ -28,20 +28,53 @@ int main(int argc, char** argv) {
...
@@ -28,20 +28,53 @@ int main(int argc, char** argv) {
for
(
int
i
=
0
;
i
<
argc
;
++
i
)
{
for
(
int
i
=
0
;
i
<
argc
;
++
i
)
{
new_argv
.
push_back
(
argv
[
i
]);
new_argv
.
push_back
(
argv
[
i
]);
}
}
std
::
vector
<
std
::
string
>
envs
;
std
::
vector
<
std
::
string
>
undefok
;
#if defined(PADDLE_WITH_DISTRIBUTE) && !defined(PADDLE_WITH_GRPC)
envs
.
push_back
(
"max_body_size"
);
#endif
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
new_argv
.
push_back
(
envs
.
push_back
(
"fraction_of_gpu_memory_to_use"
);
strdup
(
"--tryfromenv=fraction_of_gpu_memory_to_use,allocator_strategy"
)
);
envs
.
push_back
(
"allocator_strategy"
);
#elif __clang__
#elif __clang__
new_argv
.
push_back
(
envs
.
push_back
(
"use_mkldnn"
);
strdup
(
"--tryfromenv=use_mkldnn,initial_cpu_memory_in_"
envs
.
push_back
(
"initial_cpu_memory_in_mb"
);
"mb,allocator_strategy"
));
envs
.
push_back
(
"allocator_strategy"
);
new_argv
.
push_back
(
strdup
(
"--undefok=use_mkldnn,initial_cpu_memory_in_mb"
));
undefok
.
push_back
(
"use_mkldnn"
);
undefok
.
push_back
(
"initial_cpu_memory_in_mb"
);
#else
#else
new_argv
.
push_back
(
envs
.
push_back
(
"use_pinned_memory"
);
strdup
(
"--tryfromenv=use_pinned_memory,use_mkldnn,initial_cpu_memory_in_"
envs
.
push_back
(
"use_mkldnn"
);
"mb,allocator_strategy"
));
envs
.
push_back
(
"initial_cpu_memory_in_mb"
);
new_argv
.
push_back
(
strdup
(
"--undefok=use_mkldnn,initial_cpu_memory_in_mb"
));
envs
.
push_back
(
"allocator_strategy"
);
undefok
.
push_back
(
"use_mkldnn"
);
undefok
.
push_back
(
"initial_cpu_memory_in_mb"
);
#endif
#endif
if
(
envs
.
size
()
>
0
)
{
std
::
string
env_string
=
"--tryfromenv="
;
for
(
auto
t
:
envs
)
{
env_string
+=
t
+
","
;
}
env_string
=
env_string
.
substr
(
0
,
env_string
.
length
()
-
1
);
new_argv
.
push_back
(
strdup
(
env_string
.
c_str
()));
VLOG
(
1
)
<<
"gtest env_string:"
<<
env_string
;
}
if
(
undefok
.
size
()
>
0
)
{
std
::
string
undefok_string
=
"--undefok="
;
for
(
auto
t
:
undefok
)
{
undefok_string
+=
t
+
","
;
}
undefok_string
=
undefok_string
.
substr
(
0
,
undefok_string
.
length
()
-
1
);
new_argv
.
push_back
(
strdup
(
undefok_string
.
c_str
()));
VLOG
(
1
)
<<
"gtest undefok_string:"
<<
undefok_string
;
}
int
new_argc
=
static_cast
<
int
>
(
new_argv
.
size
());
int
new_argc
=
static_cast
<
int
>
(
new_argv
.
size
());
char
**
new_argv_address
=
new_argv
.
data
();
char
**
new_argv_address
=
new_argv
.
data
();
google
::
ParseCommandLineFlags
(
&
new_argc
,
&
new_argv_address
,
false
);
google
::
ParseCommandLineFlags
(
&
new_argc
,
&
new_argv_address
,
false
);
...
...
python/paddle/fluid/__init__.py
浏览文件 @
48324c32
...
@@ -151,12 +151,21 @@ def __bootstrap__():
...
@@ -151,12 +151,21 @@ def __bootstrap__():
read_env_flags
.
append
(
'rpc_get_thread_num'
)
read_env_flags
.
append
(
'rpc_get_thread_num'
)
read_env_flags
.
append
(
'rpc_prefetch_thread_num'
)
read_env_flags
.
append
(
'rpc_prefetch_thread_num'
)
read_env_flags
.
append
(
'rpc_disable_reuse_port'
)
read_env_flags
.
append
(
'rpc_disable_reuse_port'
)
if
core
.
is_compiled_with_brpc
():
read_env_flags
.
append
(
'max_body_size'
)
#set brpc max body size
os
.
environ
[
'FLAGS_max_body_size'
]
=
"2147483647"
if
core
.
is_compiled_with_cuda
():
if
core
.
is_compiled_with_cuda
():
read_env_flags
+=
[
read_env_flags
+=
[
'fraction_of_gpu_memory_to_use'
,
'cudnn_deterministic'
,
'fraction_of_gpu_memory_to_use'
,
'enable_cublas_tensor_op_math'
,
'conv_workspace_size_limit'
,
'cudnn_deterministic'
,
'cudnn_exhaustive_search'
,
'memory_optimize_debug'
,
'selected_gpus'
'enable_cublas_tensor_op_math'
,
'conv_workspace_size_limit'
,
'cudnn_exhaustive_search'
,
'memory_optimize_debug'
,
'selected_gpus'
,
'cudnn_exhaustive_search_times'
,
]
]
core
.
init_gflags
([
sys
.
argv
[
0
]]
+
core
.
init_gflags
([
sys
.
argv
[
0
]]
+
...
...
python/paddle/fluid/data_feeder.py
浏览文件 @
48324c32
...
@@ -272,8 +272,7 @@ class DataFeeder(object):
...
@@ -272,8 +272,7 @@ class DataFeeder(object):
dict: the result of conversion.
dict: the result of conversion.
Raises:
Raises:
ValueError: If drop_last is False and the data batch which cannot
ValueError: If drop_last is False and the data batch which cannot fit for devices.
fit for devices.
"""
"""
def
__reader_creator__
():
def
__reader_creator__
():
...
...
python/paddle/fluid/framework.py
浏览文件 @
48324c32
...
@@ -647,20 +647,16 @@ class Operator(object):
...
@@ -647,20 +647,16 @@ class Operator(object):
self
.
desc
.
set_input
(
in_proto
.
name
,
[])
self
.
desc
.
set_input
(
in_proto
.
name
,
[])
if
outputs
is
not
None
:
if
outputs
is
not
None
:
given
=
set
()
need
=
set
()
for
n
in
outputs
:
given
.
add
(
n
)
for
m
in
proto
.
outputs
:
for
m
in
proto
.
outputs
:
need
.
add
(
m
.
name
)
if
(
m
.
name
not
in
outputs
)
and
m
.
dispensable
:
if
not
given
==
need
:
continue
raise
ValueError
((
"Incorrect setting for output(s) of "
if
not
((
m
.
name
in
outputs
)
or
m
.
dispensable
):
"operator
\"
%s
\"
. Need: [%s] Given: [%s]"
)
%
raise
ValueError
(
(
type
,
(
"Incorrect setting for output(s) of "
", "
.
join
(
six
.
binary_type
(
e
)
for
e
in
need
),
"operator
\"
%s
\"
, should set: [%s]."
)
%
(
type
,
m
.
name
))
", "
.
join
(
six
.
binary_type
(
e
)
for
e
in
given
)))
for
out_proto
in
proto
.
outputs
:
for
out_proto
in
proto
.
outputs
:
if
out_proto
.
name
not
in
outputs
:
continue
out_args
=
outputs
[
out_proto
.
name
]
out_args
=
outputs
[
out_proto
.
name
]
if
not
isinstance
(
out_args
,
list
):
if
not
isinstance
(
out_args
,
list
):
out_args
=
[
out_args
]
out_args
=
[
out_args
]
...
@@ -1638,8 +1634,8 @@ class Program(object):
...
@@ -1638,8 +1634,8 @@ class Program(object):
parameters, e.g., :code:`trainable`, :code:`optimize_attr`, need
parameters, e.g., :code:`trainable`, :code:`optimize_attr`, need
to print.
to print.
Returns
Returns
:
(str)
: The debug string.
str
: The debug string.
Raises:
Raises:
ValueError: If any of required fields is not set and throw_on_error is
ValueError: If any of required fields is not set and throw_on_error is
...
...
python/paddle/fluid/layers/control_flow.py
浏览文件 @
48324c32
...
@@ -1452,6 +1452,7 @@ class DynamicRNN(object):
...
@@ -1452,6 +1452,7 @@ class DynamicRNN(object):
def
step_input
(
self
,
x
):
def
step_input
(
self
,
x
):
"""
"""
Mark a sequence as a dynamic RNN input.
Mark a sequence as a dynamic RNN input.
Args:
Args:
x(Variable): The input sequence.
x(Variable): The input sequence.
...
@@ -1505,6 +1506,7 @@ class DynamicRNN(object):
...
@@ -1505,6 +1506,7 @@ class DynamicRNN(object):
"""
"""
Mark a variable as a RNN input. The input will not be scattered into
Mark a variable as a RNN input. The input will not be scattered into
time steps.
time steps.
Args:
Args:
x(Variable): The input variable.
x(Variable): The input variable.
...
@@ -1629,13 +1631,11 @@ class DynamicRNN(object):
...
@@ -1629,13 +1631,11 @@ class DynamicRNN(object):
Args:
Args:
init(Variable|None): The initialized variable.
init(Variable|None): The initialized variable.
shape(list|tuple): The memory shape. NOTE the shape does not contain
shape(list|tuple): The memory shape. NOTE the shape does not contain batch_size.
batch_size.
value(float): the initalized value.
value(float): the initalized value.
need_reorder(bool): True if the initialized memory depends on the
need_reorder(bool): True if the initialized memory depends on the input sample.
input sample.
dtype(str|numpy.dtype): The data type of the initialized memory.
dtype(str|numpy.dtype): The data type of the initialized memory.
...
@@ -1714,6 +1714,7 @@ class DynamicRNN(object):
...
@@ -1714,6 +1714,7 @@ class DynamicRNN(object):
"""
"""
Update the memory from ex_mem to new_mem. NOTE that the shape and data
Update the memory from ex_mem to new_mem. NOTE that the shape and data
type of :code:`ex_mem` and :code:`new_mem` must be same.
type of :code:`ex_mem` and :code:`new_mem` must be same.
Args:
Args:
ex_mem(Variable): the memory variable.
ex_mem(Variable): the memory variable.
new_mem(Variable): the plain variable generated in RNN block.
new_mem(Variable): the plain variable generated in RNN block.
...
...
python/paddle/fluid/layers/detection.py
浏览文件 @
48324c32
...
@@ -65,7 +65,7 @@ def rpn_target_assign(bbox_pred,
...
@@ -65,7 +65,7 @@ def rpn_target_assign(bbox_pred,
rpn_negative_overlap
=
0.3
,
rpn_negative_overlap
=
0.3
,
use_random
=
True
):
use_random
=
True
):
"""
"""
**
Target Assign Layer for region proposal network (RPN) in Faster-RCNN detection.
**
**
Target Assign Layer for region proposal network (RPN) in Faster-RCNN detection.
**
This layer can be, for given the Intersection-over-Union (IoU) overlap
This layer can be, for given the Intersection-over-Union (IoU) overlap
between anchors and ground truth boxes, to assign classification and
between anchors and ground truth boxes, to assign classification and
...
@@ -135,19 +135,20 @@ def rpn_target_assign(bbox_pred,
...
@@ -135,19 +135,20 @@ def rpn_target_assign(bbox_pred,
Examples:
Examples:
.. code-block:: python
.. code-block:: python
bbox_pred = layers.data(name='bbox_pred', shape=[100, 4],
bbox_pred = layers.data(name='bbox_pred', shape=[100, 4],
append_batch_size=False, dtype='float32')
append_batch_size=False, dtype='float32')
cls_logits = layers.data(name='cls_logits', shape=[100, 1],
cls_logits = layers.data(name='cls_logits', shape=[100, 1],
append_batch_size=False, dtype='float32')
append_batch_size=False, dtype='float32')
anchor_box = layers.data(name='anchor_box', shape=[20, 4],
anchor_box = layers.data(name='anchor_box', shape=[20, 4],
append_batch_size=False, dtype='float32')
append_batch_size=False, dtype='float32')
gt_boxes = layers.data(name='gt_boxes', shape=[10, 4],
gt_boxes = layers.data(name='gt_boxes', shape=[10, 4],
append_batch_size=False, dtype='float32')
append_batch_size=False, dtype='float32')
loc_pred, score_pred, loc_target, score_target, bbox_inside_weight =
loc_pred, score_pred, loc_target, score_target, bbox_inside_weight =
fluid.layers.rpn_target_assign(bbox_pred=bbox_pred,
fluid.layers.rpn_target_assign(bbox_pred=bbox_pred,
cls_logits=cls_logits,
cls_logits=cls_logits,
anchor_box=anchor_box,
anchor_box=anchor_box,
gt_boxes=gt_boxes)
gt_boxes=gt_boxes)
"""
"""
helper
=
LayerHelper
(
'rpn_target_assign'
,
**
locals
())
helper
=
LayerHelper
(
'rpn_target_assign'
,
**
locals
())
...
@@ -1519,27 +1520,30 @@ def anchor_generator(input,
...
@@ -1519,27 +1520,30 @@ def anchor_generator(input,
Args:
Args:
input(Variable): The input feature map, the format is NCHW.
input(Variable): The input feature map, the format is NCHW.
anchor_sizes(list|tuple|float): The anchor sizes of generated anchors,
anchor_sizes(list|tuple|float): The anchor sizes of generated anchors,
given in absolute pixels e.g. [64., 128., 256., 512.].
given in absolute pixels e.g. [64., 128., 256., 512.].
For instance, the anchor size of 64 means the area of this anchor equals to 64**2.
For instance, the anchor size of 64 means the area of this anchor equals to 64**2.
aspect_ratios(list|tuple|float): The height / width ratios of generated
aspect_ratios(list|tuple|float): The height / width ratios of generated
anchors, e.g. [0.5, 1.0, 2.0].
anchors, e.g. [0.5, 1.0, 2.0].
variance(list|tuple): The variances to be used in box regression deltas.
variance(list|tuple): The variances to be used in box regression deltas.
Default:[0.1, 0.1, 0.2, 0.2].
Default:[0.1, 0.1, 0.2, 0.2].
stride(list|turple): The anchors stride across width and height,
stride(list|turple): The anchors stride across width and height,e.g. [16.0, 16.0]
e.g. [16.0, 16.0]
offset(float): Prior boxes center offset. Default: 0.5
offset(float): Prior boxes center offset. Default: 0.5
name(str): Name of the prior box op. Default: None.
name(str): Name of the prior box op. Default: None.
Returns:
Returns:
Anchors(Variable): The output anchors with a layout of [H, W, num_anchors, 4].
Anchors(Variable),Variances(Variable):
H is the height of input, W is the width of input,
num_anchors is the box count of each position.
two variables:
Each anchor is in (xmin, ymin, xmax, ymax) format an unnormalized.
Variances(Variable): The expanded variances of anchors
- Anchors(Variable): The output anchors with a layout of [H, W, num_anchors, 4].
\
with a layout of [H, W, num_priors, 4].
H is the height of input, W is the width of input,
\
H is the height of input, W is the width of input
num_anchors is the box count of each position.
\
num_anchors is the box count of each position.
Each anchor is in (xmin, ymin, xmax, ymax) format an unnormalized.
Each variance is in (xcenter, ycenter, w, h) format.
- Variances(Variable): The expanded variances of anchors
\
with a layout of [H, W, num_priors, 4].
\
H is the height of input, W is the width of input
\
num_anchors is the box count of each position.
\
Each variance is in (xcenter, ycenter, w, h) format.
Examples:
Examples:
...
@@ -1748,35 +1752,35 @@ def generate_proposals(scores,
...
@@ -1748,35 +1752,35 @@ def generate_proposals(scores,
eta
=
1.0
,
eta
=
1.0
,
name
=
None
):
name
=
None
):
"""
"""
**
Generate proposal Faster-RCNN
**
**
Generate proposal Faster-RCNN
**
This operation proposes RoIs according to each box with their probability to be a foreground object and
This operation proposes RoIs according to each box with their probability to be a foreground object and
the box can be calculated by anchors. Bbox_deltais and scores to be an object are the output of RPN. Final proposals
the box can be calculated by anchors. Bbox_deltais and scores to be an object are the output of RPN. Final proposals
could be used to train detection net.
could be used to train detection net.
For generating proposals, this operation performs following steps:
For generating proposals, this operation performs following steps:
1. Transposes and resizes scores and bbox_deltas in size of (H*W*A, 1) and (H*W*A, 4)
1. Transposes and resizes scores and bbox_deltas in size of (H*W*A, 1) and (H*W*A, 4)
2. Calculate box locations as proposals candidates.
2. Calculate box locations as proposals candidates.
3. Clip boxes to image
3. Clip boxes to image
4. Remove predicted boxes with small area.
4. Remove predicted boxes with small area.
5. Apply NMS to get final proposals as output.
5. Apply NMS to get final proposals as output.
Args:
Args:
scores(Variable): A 4-D Tensor with shape [N, A, H, W] represents the probability for each box to be an object.
scores(Variable): A 4-D Tensor with shape [N, A, H, W] represents the probability for each box to be an object
.
N is batch size, A is number of anchors, H and W are height and width of the feature map
.
N is batch size, A is number of anchors, H and W are height and width of the feature map.
bbox_deltas(Variable): A 4-D Tensor with shape [N, 4*A, H, W] represents the differece between predicted box locatoin and anchor location.
bbox_deltas(Variable): A 4-D Tensor with shape [N, 4*A, H, W] represents the differece between predicted box locatoin and anchor location.
im_info(Variable): A 2-D Tensor with shape [N, 3] represents origin image information for N batch. Info contains height, width and scale
im_info(Variable): A 2-D Tensor with shape [N, 3] represents origin image information for N batch. Info contains height, width and scale
between origin image size and the size of feature map.
between origin image size and the size of feature map.
anchors(Variable): A 4-D Tensor represents the anchors with a layout of [H, W, A, 4]. H and W are height and width of the feature map,
anchors(Variable): A 4-D Tensor represents the anchors with a layout of [H, W, A, 4]. H and W are height and width of the feature map,
num_anchors is the box count of each position. Each anchor is in (xmin, ymin, xmax, ymax) format an unnormalized.
num_anchors is the box count of each position. Each anchor is in (xmin, ymin, xmax, ymax) format an unnormalized
.
variances(Variable): The expanded variances of anchors with a layout of [H, W, num_priors, 4]. Each variance is in (xcenter, ycenter, w, h) format
.
variances(Variable): The expanded variances of anchors with a layout of [H, W, num_priors, 4]. Each variance is in (xcenter, ycenter, w, h) forma
t.
pre_nms_top_n(float): Number of total bboxes to be kept per image before NMS. 6000 by defaul
t.
pre_nms_top_n(float): Number of total bboxes to be kept per image before NMS. 6
000 by default.
post_nms_top_n(float): Number of total bboxes to be kept per image after NMS. 1
000 by default.
post_nms_top_n(float): Number of total bboxes to be kept per image after NMS. 1000
by default.
nms_thresh(float): Threshold in NMS, 0.5
by default.
nms_thresh(float): Threshold in NMS, 0.5
by default.
min_size(float): Remove predicted boxes with either height or width < min_size. 0.1
by default.
min_size(float): Remove predicted boxes with either height or width < min_size. 0.1 by default
.
eta(float): Apply in adaptive NMS, if adaptive threshold > 0.5, adaptive_threshold = adaptive_threshold * eta in each iteration
.
eta(float): Apply in adaptive NMS, if adaptive threshold > 0.5, adaptive_threshold = adaptive_threshold * eta in each iteration.
"""
"""
helper
=
LayerHelper
(
'generate_proposals'
,
**
locals
())
helper
=
LayerHelper
(
'generate_proposals'
,
**
locals
())
...
...
python/paddle/fluid/layers/io.py
浏览文件 @
48324c32
...
@@ -949,12 +949,11 @@ def shuffle(reader, buffer_size):
...
@@ -949,12 +949,11 @@ def shuffle(reader, buffer_size):
is determined by argument buf_size.
is determined by argument buf_size.
Args:
Args:
param reader: the original reader whose output will be shuffled.
reader(callable): the original reader whose output will be shuffled.
type reader: callable
buf_size(int): shuffle buffer size.
param buf_size: shuffle buffer size.
type buf_size: int
Returns:
return: the new reader whose output is shuffled.
callable: the new reader whose output is shuffled.
rtype: callable
"""
"""
return
__create_unshared_decorated_reader__
(
return
__create_unshared_decorated_reader__
(
'create_shuffle_reader'
,
reader
,
{
'buffer_size'
:
int
(
buffer_size
)})
'create_shuffle_reader'
,
reader
,
{
'buffer_size'
:
int
(
buffer_size
)})
...
...
python/paddle/fluid/layers/nn.py
浏览文件 @
48324c32
...
@@ -233,7 +233,7 @@ def fc(input,
...
@@ -233,7 +233,7 @@ def fc(input,
dimensions will be flatten to form the first dimension of the final matrix (height of
dimensions will be flatten to form the first dimension of the final matrix (height of
the matrix), and the rest `rank(X) - num_flatten_dims` dimensions are flattened to
the matrix), and the rest `rank(X) - num_flatten_dims` dimensions are flattened to
form the second dimension of the final matrix (width of the matrix). For example, suppose
form the second dimension of the final matrix (width of the matrix). For example, suppose
`X` is a
6
-dimensional tensor with a shape [2, 3, 4, 5, 6], and `num_flatten_dims` = 3.
`X` is a
5
-dimensional tensor with a shape [2, 3, 4, 5, 6], and `num_flatten_dims` = 3.
Then, the flattened matrix will have a shape [2 x 3 x 4, 5 x 6] = [24, 30].
Then, the flattened matrix will have a shape [2 x 3 x 4, 5 x 6] = [24, 30].
param_attr (ParamAttr|list of ParamAttr, default None): The parameter attribute for learnable
param_attr (ParamAttr|list of ParamAttr, default None): The parameter attribute for learnable
parameters/weights of this layer.
parameters/weights of this layer.
...
@@ -502,46 +502,48 @@ def lstm(input,
...
@@ -502,46 +502,48 @@ def lstm(input,
If Device is GPU, This op will use cudnn LSTM implementation
If Device is GPU, This op will use cudnn LSTM implementation
A four-gate Long Short-Term Memory network with no peephole connections.
A four-gate Long Short-Term Memory network with no peephole connections.
In the forward pass the output ht and cell output ct for a given iteration can be computed from the recurrent input ht-1,
In the forward pass the output ht and cell output ct for a given iteration can be computed from the recurrent input ht-1,
the cell input ct-1 and the previous layer input xt given matrices W, R and biases bW, bR from the following equations:
the cell input ct-1 and the previous layer input xt given matrices W, R and biases bW, bR from the following equations:
$$ i_t =
\\
sigma(W_{ix}x_{t} + W_{ih}h_{t-1} + bx_i + bh_i) $$
.. math::
$$ f_t =
\\
sigma(W_{fx}x_{t} + W_{fh}h_{t-1} + bx_f + bh_f) $$
i_t &= \sigma(W_{ix}x_{t} + W_{ih}h_{t-1} + bx_i + bh_i)
$$ o_t =
\\
sigma(W_{ox}x_{t} + W_{oh}h_{t-1} + bx_o + bh_o) $$
f_t &= \sigma(W_{fx}x_{t} + W_{fh}h_{t-1} + bx_f + bh_f)
$$
\\
tilde{c_t} = tanh(W_{cx}x_t + W_{ch}h_{t-1} + bx_c + bh_c) $$
o_t &= \sigma(W_{ox}x_{t} + W_{oh}h_{t-1} + bx_o + bh_o)
$$ c_t = f_t
\\
odot c_{t-1} + i_t
\\
odot
\\
tilde{c_t} $$
\\
tilde{c_t} &= tanh(W_{cx}x_t + W_{ch}h_{t-1} + bx_c + bh_c)
$$ h_t = o_t
\\
odot tanh(c_t) $$
c_t &= f_t \odot c_{t-1} + i_t \odot
\\
tilde{c_t}
- W terms denote weight matrices (e.g. $W_{ix}$ is the matrix
h_t &= o_t \odot tanh(c_t)
- $W$ terms denote weight matrices (e.g. $W_{ix}$ is the matrix
of weights from the input gate to the input)
of weights from the input gate to the input)
- The b terms denote bias vectors ($bx_i$ and $bh_i$ are the input gate bias vector).
- The b terms denote bias vectors ($bx_i$ and $bh_i$ are the input gate bias vector).
- sigmoid is the logistic sigmoid function.
- sigmoid is the logistic sigmoid function.
- $i, f, o$ and $c$ are the input gate, forget gate, output gate,
- $i, f, o$ and $c$ are the input gate, forget gate, output gate,
and cell activation vectors, respectively, all of which have the same size as
and cell activation vectors, respectively, all of which have the same size as
the cell output activation vector $h$.
the cell output activation vector $h$.
- The
$\odot$
is the element-wise product of the vectors.
- The
:math:`\odot`
is the element-wise product of the vectors.
- `tanh` is the activation functions.
-
:math:
`tanh` is the activation functions.
-
$
\t
ilde{c_t}$
is also called candidate hidden state,
-
:math:`
\\
tilde{c_t}`
is also called candidate hidden state,
which is computed based on the current input and the previous hidden state.
which is computed based on the current input and the previous hidden state.
Where sigmoid is the sigmoid operator:
sigmoid(x) = 1 / (1 + e^-x), * represents a point-wise multiplication,
Where sigmoid is the sigmoid operator:
:math:`sigmoid(x) = 1 / (1 + e^{-x})` , * represents a point-wise multiplication,
X represensts a matrix multiplication
X represensts a matrix multiplication
Args:
Args:
input (Variable): LSTM input tensor, shape MUST be ( seq_len x batch_size x input_size )
input (Variable): LSTM input tensor, shape MUST be ( seq_len x batch_size x input_size )
init_h(Variable): The initial hidden state of the LSTM
init_h(Variable): The initial hidden state of the LSTM
This is a tensor with shape ( num_layers x batch_size x hidden_size)
This is a tensor with shape ( num_layers x batch_size x hidden_size)
if is_bidirec = True, shape should be ( num_layers*2 x batch_size x hidden_size)
if is_bidirec = True, shape should be ( num_layers*2 x batch_size x hidden_size)
init_c(Variable): The initial cell state of the LSTM.
init_c(Variable): The initial cell state of the LSTM.
This is a tensor with shape ( num_layers x batch_size x hidden_size )
This is a tensor with shape ( num_layers x batch_size x hidden_size )
if is_bidirec = True, shape should be ( num_layers*2 x batch_size x hidden_size)
if is_bidirec = True, shape should be ( num_layers*2 x batch_size x hidden_size)
max_len (int): max length of LSTM. the first dim of input tensor CAN NOT greater than max_len
max_len (int): max length of LSTM. the first dim of input tensor CAN NOT greater than max_len
hidden_size (int): hidden size of the LSTM
hidden_size (int): hidden size of the LSTM
num_layers (int): total layers number of the LSTM
num_layers (int): total layers number of the LSTM
dropout_prob(float|0.0): dropout prob, dropout ONLY work between rnn layers, NOT between time steps
dropout_prob(float|0.0): dropout prob, dropout ONLY work between rnn layers, NOT between time steps
...
@@ -556,14 +558,18 @@ def lstm(input,
...
@@ -556,14 +558,18 @@ def lstm(input,
Returns:
Returns:
rnn_out(Tensor): result of LSTM hidden, shape is (seq_len x batch_size x hidden_size)
rnn_out(Tensor),last_h(Tensor),last_c(Tensor):
if is_bidirec set to True, shape will be ( seq_len x batch_sze x hidden_size*2)
last_h(Tensor): the hidden state of the last step of LSTM
Three tensors, rnn_out, last_h, last_c:
shape is ( num_layers x batch_size x hidden_size )
if is_bidirec set to True, shape will be ( num_layers*2 x batch_size x hidden_size)
- rnn_out is result of LSTM hidden, shape is (seq_len x batch_size x hidden_size)
\
last_c(Tensor): the cell state of the last step of LSTM
if is_bidirec set to True, shape will be ( seq_len x batch_sze x hidden_size*2)
shape is ( num_layers x batch_size x hidden_size )
- last_h is the hidden state of the last step of LSTM
\
if is_bidirec set to True, shape will be ( num_layers*2 x batch_size x hidden_size)
shape is ( num_layers x batch_size x hidden_size )
\
if is_bidirec set to True, shape will be ( num_layers*2 x batch_size x hidden_size)
- last_c(Tensor): the cell state of the last step of LSTM
\
shape is ( num_layers x batch_size x hidden_size )
\
if is_bidirec set to True, shape will be ( num_layers*2 x batch_size x hidden_size)
Examples:
Examples:
...
@@ -1220,6 +1226,8 @@ def dropout(x,
...
@@ -1220,6 +1226,8 @@ def dropout(x,
probability) the outputs of some units to zero, while others are remain
probability) the outputs of some units to zero, while others are remain
unchanged.
unchanged.
dropout op can be removed from the program to make the program more efficient.
Args:
Args:
x (Variable): The input tensor variable.
x (Variable): The input tensor variable.
dropout_prob (float): Probability of setting units to zero.
dropout_prob (float): Probability of setting units to zero.
...
@@ -1230,22 +1238,24 @@ def dropout(x,
...
@@ -1230,22 +1238,24 @@ def dropout(x,
units will be dropped. DO NOT use a fixed seed in training.
units will be dropped. DO NOT use a fixed seed in training.
name (str|None): A name for this layer(optional). If set None, the layer
name (str|None): A name for this layer(optional). If set None, the layer
will be named automatically.
will be named automatically.
dropout_implementation(string): ['downgrade_in_infer'(defauld)|'upscale_in_train']
dropout_implementation(string): ['downgrade_in_infer'(default)|'upscale_in_train']
1. downgrade_in_infer(default), downgrade the outcome at inference
1. downgrade_in_infer(default), downgrade the outcome at inference
train: out = input * mask
inference: out = input * dropout_prob
- train: out = input * mask
(make is a tensor same shape with input, value is 0 or 1
- inference: out = input * dropout_prob
ratio of 0 is dropout_prob)
(mask is a tensor same shape with input, value is 0 or 1
ratio of 0 is dropout_prob)
2. upscale_in_train, upscale the outcome at training time
2. upscale_in_train, upscale the outcome at training time
train: out = input * mask / ( 1.0 - dropout_prob )
inference: out = input
(make is a tensor same shape with input, value is 0 or 1
ratio of 0 is dropout_prob)
dropout op can be removed from the program.
the program will be efficient
- train: out = input * mask / ( 1.0 - dropout_prob )
- inference: out = input
(mask is a tensor same shape with input, value is 0 or 1
ratio of 0 is dropout_prob)
Returns:
Returns:
Variable: A tensor variable is the shape with `x`.
Variable: A tensor variable is the shape with `x`.
...
@@ -1333,11 +1343,15 @@ def cross_entropy(input, label, soft_label=False, ignore_index=kIgnoreIndex):
...
@@ -1333,11 +1343,15 @@ def cross_entropy(input, label, soft_label=False, ignore_index=kIgnoreIndex):
A 2-D tensor with shape [N x 1], the cross entropy loss.
A 2-D tensor with shape [N x 1], the cross entropy loss.
Raises:
Raises:
`ValueError`: 1) the 1st dimension of `input` and `label` are not equal.
ValueError:
2) when `soft_label == True`, and the 2nd dimension of
`input` and `label` are not equal.
1. the 1st dimension of ``input`` and ``label`` are not equal.
3) when `soft_label == False`, and the 2nd dimension of
`label` is not 1.
2. when ``soft_label == True``, and the 2nd dimension of
``input`` and ``label`` are not equal.
3. when ``soft_label == False``, and the 2nd dimension of
``label`` is not 1.
Examples:
Examples:
.. code-block:: python
.. code-block:: python
...
@@ -1457,8 +1471,8 @@ def chunk_eval(input,
...
@@ -1457,8 +1471,8 @@ def chunk_eval(input,
This function computes and outputs the precision, recall and
This function computes and outputs the precision, recall and
F1-score of chunk detection.
F1-score of chunk detection.
For some basics of chunking, please refer to
For some basics of chunking, please refer to
'Chunking with Support Vector Machines <https://aclanthology.info/pdf/N/N01/N01-1025.pdf>'
.
`Chunking with Support Vector Machines <https://aclanthology.info/pdf/N/N01/N01-1025.pdf>`_
.
ChunkEvalOp computes the precision, recall, and F1-score of chunk detection,
ChunkEvalOp computes the precision, recall, and F1-score of chunk detection,
and supports IOB, IOE, IOBES and IO (also known as plain) tagging schemes.
and supports IOB, IOE, IOBES and IO (also known as plain) tagging schemes.
...
@@ -1823,7 +1837,7 @@ def conv2d(input,
...
@@ -1823,7 +1837,7 @@ def conv2d(input,
of conv2d. If it is set to None or one attribute of ParamAttr, conv2d
of conv2d. If it is set to None or one attribute of ParamAttr, conv2d
will create ParamAttr as param_attr. If the Initializer of the param_attr
will create ParamAttr as param_attr. If the Initializer of the param_attr
is not set, the parameter is initialized with :math:`Normal(0.0, std)`,
is not set, the parameter is initialized with :math:`Normal(0.0, std)`,
and the :math:`std` is :math:`(
\\
frac{2.0 }{filter\_elem\_num})^{0.5}`. Default: None.
and the :math:`std` is :math:`(
\\
frac{2.0 }{filter\_elem\_num})^{0.5}`. Default: None.
bias_attr (ParamAttr|bool|None): The parameter attribute for the bias of conv2d.
bias_attr (ParamAttr|bool|None): The parameter attribute for the bias of conv2d.
If it is set to False, no bias will be added to the output units.
If it is set to False, no bias will be added to the output units.
If it is set to None or one attribute of ParamAttr, conv2d
If it is set to None or one attribute of ParamAttr, conv2d
...
@@ -2276,7 +2290,7 @@ def sequence_slice(input, offset, length, name=None):
...
@@ -2276,7 +2290,7 @@ def sequence_slice(input, offset, length, name=None):
.. code-block:: text
.. code-block:: text
- Case:
- Case:
Given the input Variable **input**:
Given the input Variable **input**:
...
@@ -2292,7 +2306,8 @@ def sequence_slice(input, offset, length, name=None):
...
@@ -2292,7 +2306,8 @@ def sequence_slice(input, offset, length, name=None):
out.lod = [[2, 1]],
out.lod = [[2, 1]],
out.dims = (3, 2).
out.dims = (3, 2).
NOTE: The first dimension size of **input**, **offset** and **length**
Note:
The first dimension size of **input**, **offset** and **length**
should be equal. The **offset** should start from 0.
should be equal. The **offset** should start from 0.
Args:
Args:
...
@@ -2570,12 +2585,7 @@ def adaptive_pool2d(input,
...
@@ -2570,12 +2585,7 @@ def adaptive_pool2d(input,
raise
ValueError
(
raise
ValueError
(
"invalid setting 'require_index' true when 'pool_type' is 'avg'."
)
"invalid setting 'require_index' true when 'pool_type' is 'avg'."
)
def
_is_list_or_tuple_
(
data
):
pool_size
=
utils
.
convert_to_list
(
pool_size
,
2
,
'pool_size'
)
return
(
isinstance
(
data
,
list
)
or
isinstance
(
data
,
tuple
))
if
not
_is_list_or_tuple_
(
pool_size
)
or
len
(
pool_size
)
!=
2
:
raise
ValueError
(
"'pool_size' should be a list or tuple with length as 2."
)
if
pool_type
==
"max"
:
if
pool_type
==
"max"
:
l_type
=
'max_pool2d_with_index'
l_type
=
'max_pool2d_with_index'
...
@@ -2671,12 +2681,7 @@ def adaptive_pool3d(input,
...
@@ -2671,12 +2681,7 @@ def adaptive_pool3d(input,
raise
ValueError
(
raise
ValueError
(
"invalid setting 'require_index' true when 'pool_type' is 'avg'."
)
"invalid setting 'require_index' true when 'pool_type' is 'avg'."
)
def
_is_list_or_tuple_
(
data
):
pool_size
=
utils
.
convert_to_list
(
pool_size
,
3
,
'pool_size'
)
return
(
isinstance
(
data
,
list
)
or
isinstance
(
data
,
tuple
))
if
not
_is_list_or_tuple_
(
pool_size
)
or
len
(
pool_size
)
!=
3
:
raise
ValueError
(
"'pool_size' should be a list or tuple with length as 3."
)
if
pool_type
==
"max"
:
if
pool_type
==
"max"
:
l_type
=
'max_pool3d_with_index'
l_type
=
'max_pool3d_with_index'
...
@@ -3013,7 +3018,7 @@ def group_norm(input,
...
@@ -3013,7 +3018,7 @@ def group_norm(input,
"""
"""
**Group Normalization Layer**
**Group Normalization Layer**
Refer to `Group Normalization <https://arxiv.org/abs/1803.08494>`
Refer to `Group Normalization <https://arxiv.org/abs/1803.08494>`
_ .
Args:
Args:
input(Variable): The input tensor variable.
input(Variable): The input tensor variable.
...
@@ -3140,8 +3145,8 @@ def conv2d_transpose(input,
...
@@ -3140,8 +3145,8 @@ def conv2d_transpose(input,
H^\prime_{out} &= (H_{in} - 1) * strides[0] - 2 * paddings[0] + dilations[0] * (H_f - 1) + 1
\\\\
H^\prime_{out} &= (H_{in} - 1) * strides[0] - 2 * paddings[0] + dilations[0] * (H_f - 1) + 1
\\\\
W^\prime_{out} &= (W_{in} - 1) * strides[1] - 2 * paddings[1] + dilations[1] * (W_f - 1) + 1
\\\\
W^\prime_{out} &= (W_{in} - 1) * strides[1] - 2 * paddings[1] + dilations[1] * (W_f - 1) + 1
\\\\
H_{out} \in [ H^\prime_{out}, H^\prime_{out} + strides[0] )
\\\\
H_{out}
&
\in [ H^\prime_{out}, H^\prime_{out} + strides[0] )
\\\\
W_{out} \in [ W^\prime_{out}, W^\prime_{out} + strides[1] )
W_{out}
&
\in [ W^\prime_{out}, W^\prime_{out} + strides[1] )
Args:
Args:
input(Variable): The input image with [N, C, H, W] format.
input(Variable): The input image with [N, C, H, W] format.
...
@@ -4673,7 +4678,7 @@ def ctc_greedy_decoder(input, blank, name=None):
...
@@ -4673,7 +4678,7 @@ def ctc_greedy_decoder(input, blank, name=None):
[0.5, 0.1, 0.3, 0.1]]
[0.5, 0.1, 0.3, 0.1]]
input.lod = [[4, 4]]
input.lod = [[4, 4]]
Computation:
Computation:
step1: Apply argmax to first input sequence which is input.data[0:4]. Then we get:
step1: Apply argmax to first input sequence which is input.data[0:4]. Then we get:
...
@@ -4704,10 +4709,10 @@ def ctc_greedy_decoder(input, blank, name=None):
...
@@ -4704,10 +4709,10 @@ def ctc_greedy_decoder(input, blank, name=None):
name (str): The name of this layer. It is optional.
name (str): The name of this layer. It is optional.
Returns:
Returns:
Variable: CTC greedy decode result which is a 2-D tensor with shape [Lp, 1].
Variable: CTC greedy decode result which is a 2-D tensor with shape [Lp, 1].
\
'Lp' is the sum if all output sequences' length. If all the sequences
'Lp' is the sum if all output sequences' length. If all the sequences
\
in result were empty, the result LoDTensor will be [-1] with
in result were empty, the result LoDTensor will be [-1] with
\
LoD [[]] and dims [1, 1].
LoD [[]] and dims [1, 1].
Examples:
Examples:
.. code-block:: python
.. code-block:: python
...
@@ -5060,7 +5065,7 @@ def hsigmoid(input,
...
@@ -5060,7 +5065,7 @@ def hsigmoid(input,
"""
"""
The hierarchical sigmoid operator is used to accelerate the training
The hierarchical sigmoid operator is used to accelerate the training
process of language model. This operator organizes the classes into a
process of language model. This operator organizes the classes into a
complete binary tree, or you can use is_custom to pass your own tree to
complete binary tree, or you can use is_custom to pass your own tree to
implement hierarchical. Each leaf node represents a class(a word) and each
implement hierarchical. Each leaf node represents a class(a word) and each
internal node acts as a binary classifier. For each word there's a unique
internal node acts as a binary classifier. For each word there's a unique
path from root to it's leaf node, hsigmoid calculate the cost for each
path from root to it's leaf node, hsigmoid calculate the cost for each
...
@@ -5072,13 +5077,13 @@ def hsigmoid(input,
...
@@ -5072,13 +5077,13 @@ def hsigmoid(input,
<http://www.iro.umontreal.ca/~lisa/pointeurs/hierarchical-nnlm-aistats05.pdf>`_
<http://www.iro.umontreal.ca/~lisa/pointeurs/hierarchical-nnlm-aistats05.pdf>`_
And if you want to use the costumed tree by set 'is_custom' as true you may need to do following things first:
And if you want to use the costumed tree by set 'is_custom' as true you may need to do following things first:
1. using your word dict to build a binary tree, each leaf node should be an word of your word dict
2. build a dict to store word_id -> word's leaf to root path, we call it path_table.
3. build a dict to store word_id -> code of word's leaf to root path, we call it path_code. Code
means label of each binary classification, using 1 indicate true, 0 indicate false.
4. now, each word should has its path and code along the path, you can pass a batch of path and code
related to the same batch of inputs.
1. using your word dict to build a binary tree, each leaf node should be an word of your word dict
2. build a dict to store word_id -> word's leaf to root path, we call it path_table.
3. build a dict to store word_id -> code of word's leaf to root path, we call it path_code. Code
means label of each binary classification, using 1 indicate true, 0 indicate false.
4. now, each word should has its path and code along the path, you can pass a batch of path and code
related to the same batch of inputs.
Args:
Args:
input (Variable): The input tensor variable with shape
input (Variable): The input tensor variable with shape
...
@@ -5086,8 +5091,8 @@ def hsigmoid(input,
...
@@ -5086,8 +5091,8 @@ def hsigmoid(input,
and :math:`D` is the feature size.
and :math:`D` is the feature size.
label (Variable): The tensor variable contains labels of training data.
label (Variable): The tensor variable contains labels of training data.
It's a tensor with shape is :math:`[N
\\
times 1]`.
It's a tensor with shape is :math:`[N
\\
times 1]`.
num_classes: (int), The number of classes, must not be less than 2. with default tree this has to be set,
num_classes: (int), The number of classes, must not be less than 2. with default tree this has to be set,
it should never be None under is_custom=False, but while is_custom is true, it should be non leaf num
it should never be None under is_custom=False, but while is_custom is true, it should be non leaf num
which indicates the num of classes using by binary classify.
which indicates the num of classes using by binary classify.
param_attr (ParamAttr|None): The parameter attribute for learnable parameters/weights
param_attr (ParamAttr|None): The parameter attribute for learnable parameters/weights
of hsigmoid. If it is set to None or one attribute of ParamAttr, hsigmoid
of hsigmoid. If it is set to None or one attribute of ParamAttr, hsigmoid
...
@@ -5100,15 +5105,15 @@ def hsigmoid(input,
...
@@ -5100,15 +5105,15 @@ def hsigmoid(input,
is not set, the bias is initialized zero. Default: None.
is not set, the bias is initialized zero. Default: None.
name (str|None): A name for this layer(optional). If set None, the layer
name (str|None): A name for this layer(optional). If set None, the layer
will be named automatically. Default: None.
will be named automatically. Default: None.
path_table: (Variable|None) this variable can store each batch of samples' path to root,
path_table: (Variable|None) this variable can store each batch of samples' path to root,
it should be in leaf -> root order
it should be in leaf -> root order
path_table should have the same shape with path_code, and for each sample i path_table[i] indicates a np.array like
path_table should have the same shape with path_code, and for each sample i path_table[i] indicates a np.array like
structure and each element in this array is indexes in parent nodes' Weight Matrix.
structure and each element in this array is indexes in parent nodes' Weight Matrix.
path_code: (Variable|None) this variable can store each batch of samples' code,
path_code: (Variable|None) this variable can store each batch of samples' code,
each code consist with every code of parent nodes. it should be in leaf -> root order
each code consist with every code of parent nodes. it should be in leaf -> root order
is_custom: (bool|False)using user defined binary tree instead of default complete binary tree, if costum is
is_custom: (bool|False)using user defined binary tree instead of default complete binary tree, if costum is
set you need to set path_table/path_code/num_classes, otherwise num_classes should be set
set you need to set path_table/path_code/num_classes, otherwise num_classes should be set
is_sparse: (bool|False)using sparse update instead of dense update, if set, the gradient
is_sparse: (bool|False)using sparse update instead of dense update, if set, the gradient
of W and input will be sparse.
of W and input will be sparse.
Returns:
Returns:
...
@@ -5485,11 +5490,11 @@ def softmax_with_cross_entropy(logits,
...
@@ -5485,11 +5490,11 @@ def softmax_with_cross_entropy(logits,
.. math::
.. math::
max_j =
\\
max_{i=0}^{K}{
\\
text{logit}_i}
max_j
&
=
\\
max_{i=0}^{K}{
\\
text{logit}_i}
log
\\
_max
\\
_sum_j =
\\
log
\\
sum_{i=0}^{K}
\\
exp(logit_i - max_j)
log
\\
_max
\\
_sum_j
&
=
\\
log
\\
sum_{i=0}^{K}
\\
exp(logit_i - max_j)
softmax_j =
\\
exp(logit_j - max_j - {log
\\
_max
\\
_sum}_j)
softmax_j
&
=
\\
exp(logit_j - max_j - {log
\\
_max
\\
_sum}_j)
and then cross entropy loss is calculated by softmax and label.
and then cross entropy loss is calculated by softmax and label.
...
@@ -5515,11 +5520,11 @@ def softmax_with_cross_entropy(logits,
...
@@ -5515,11 +5520,11 @@ def softmax_with_cross_entropy(logits,
along with the cross entropy loss. Default: False
along with the cross entropy loss. Default: False
Returns:
Returns:
Variable or Tuple of two Variables: Return the cross entropy loss if
Variable or Tuple of two Variables: Return the cross entropy loss if
\
`return_softmax` is False, otherwise the tuple
`return_softmax` is False, otherwise the tuple
\
(loss, softmax), where the cross entropy loss is
(loss, softmax), where the cross entropy loss is
\
a 2-D tensor with shape [N x 1], and softmax is a
a 2-D tensor with shape [N x 1], and softmax is a
\
2-D tensor with shape [N x K].
2-D tensor with shape [N x K].
Examples:
Examples:
.. code-block:: python
.. code-block:: python
...
@@ -5792,21 +5797,27 @@ def squeeze(input, axes, name=None):
...
@@ -5792,21 +5797,27 @@ def squeeze(input, axes, name=None):
the single dimensions will be removed from the shape. If an axis is
the single dimensions will be removed from the shape. If an axis is
selected with shape entry not equal to one, an error is raised.
selected with shape entry not equal to one, an error is raised.
Examples:
For example:
Case 1:
Given
.. code-block:: text
X.shape = (1, 3, 1, 5)
and
Case 1:
axes = [0]
we get:
Given
Out.shape = (3, 1, 5)
X.shape = (1, 3, 1, 5)
Case 2:
and
Given
axes = [0]
X.shape = (1, 3, 1, 5)
we get:
and
Out.shape = (3, 1, 5)
axes = []
we get:
Case 2:
Out.shape = (3, 5)
Given
X.shape = (1, 3, 1, 5)
and
axes = []
we get:
Out.shape = (3, 5)
Args:
Args:
input (Variable): The input variable to be squeezed.
input (Variable): The input variable to be squeezed.
...
@@ -5842,6 +5853,9 @@ def unsqueeze(input, axes, name=None):
...
@@ -5842,6 +5853,9 @@ def unsqueeze(input, axes, name=None):
Dimension indices in axes are as seen in the output tensor.
Dimension indices in axes are as seen in the output tensor.
For example:
For example:
.. code-block:: text
Given a tensor such that tensor with shape [3, 4, 5],
Given a tensor such that tensor with shape [3, 4, 5],
then Unsqueezed tensor with axes=[0, 4] has shape [1, 3, 4, 5, 1].
then Unsqueezed tensor with axes=[0, 4] has shape [1, 3, 4, 5, 1].
...
@@ -6729,8 +6743,11 @@ def sequence_scatter(input, index, updates, name=None):
...
@@ -6729,8 +6743,11 @@ def sequence_scatter(input, index, updates, name=None):
the columns to update in each row of X.
the columns to update in each row of X.
Here is an example:
Here is an example:
Given the following input:
Given the following input:
.. code-block:: text
.. code-block:: text
input.data = [[1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
input.data = [[1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0, 1.0, 1.0]]
[1.0, 1.0, 1.0, 1.0, 1.0, 1.0]]
...
@@ -6743,7 +6760,9 @@ def sequence_scatter(input, index, updates, name=None):
...
@@ -6743,7 +6760,9 @@ def sequence_scatter(input, index, updates, name=None):
updates.lod = [[ 0, 3, 8, 12]]
updates.lod = [[ 0, 3, 8, 12]]
Then we have the output:
Then we have the output:
.. code-block:: text
.. code-block:: text
out.data = [[1.3, 1.3, 1.4, 1.0, 1.0, 1.0],
out.data = [[1.3, 1.3, 1.4, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.4, 1.3, 1.2, 1.1],
[1.0, 1.0, 1.4, 1.3, 1.2, 1.1],
[1.0, 1.0, 1.3, 1.2, 1.4, 1.1]]
[1.0, 1.0, 1.3, 1.2, 1.4, 1.1]]
...
@@ -6759,7 +6778,7 @@ def sequence_scatter(input, index, updates, name=None):
...
@@ -6759,7 +6778,7 @@ def sequence_scatter(input, index, updates, name=None):
name (str|None): The output variable name. Default None.
name (str|None): The output variable name. Default None.
Returns:
Returns:
output (Variable)
: The output is a tensor with the same shape as input.
Variable
: The output is a tensor with the same shape as input.
Examples:
Examples:
...
@@ -6933,7 +6952,7 @@ def mean_iou(input, label, num_classes):
...
@@ -6933,7 +6952,7 @@ def mean_iou(input, label, num_classes):
.. math::
.. math::
IOU =
\\
frac{true\_positiv}{(true\_positive + false\_positive + false\_negative)}.
IOU =
\\
frac{true\_positiv
e
}{(true\_positive + false\_positive + false\_negative)}.
The predictions are accumulated in a confusion matrix and mean-IOU
The predictions are accumulated in a confusion matrix and mean-IOU
is then calculated from it.
is then calculated from it.
...
@@ -6946,9 +6965,13 @@ def mean_iou(input, label, num_classes):
...
@@ -6946,9 +6965,13 @@ def mean_iou(input, label, num_classes):
num_classes (int): The possible number of labels.
num_classes (int): The possible number of labels.
Returns:
Returns:
mean_iou (Variable): A Tensor representing the mean intersection-over-union with shape [1].
mean_iou (Variable),out_wrong(Variable),out_correct(Variable):
out_wrong(Variable): A Tensor with shape [num_classes]. The wrong numbers of each class.
out_correct(Variable): A Tensor with shape [num_classes]. The correct numbers of each class.
Three variables:
- mean_iou : A Tensor representing the mean intersection-over-union with shape [1].
- out_wrong: A Tensor with shape [num_classes]. The wrong numbers of each class.
- out_correct: A Tensor with shape [num_classes]. The correct numbers of each class.
Examples:
Examples:
...
@@ -7143,8 +7166,8 @@ def affine_grid(theta, out_shape, name=None):
...
@@ -7143,8 +7166,8 @@ def affine_grid(theta, out_shape, name=None):
Args:
Args:
theta (Variable): A batch of affine transform parameters with shape [N, 2, 3].
theta (Variable): A batch of affine transform parameters with shape [N, 2, 3].
out_shape (Variable | list | tuple): The shape of target output with format [N, C, H, W].
out_shape (Variable | list | tuple): The shape of target output with format [N, C, H, W].
out_shape
can be a Variable or a list or tuple.
``out_shape``
can be a Variable or a list or tuple.
name(str|None): A name for this layer(optional). If set None, the layer
name(str|None): A name for this layer(optional). If set None, the layer
will be named automatically.
will be named automatically.
...
@@ -7157,6 +7180,7 @@ def affine_grid(theta, out_shape, name=None):
...
@@ -7157,6 +7180,7 @@ def affine_grid(theta, out_shape, name=None):
Examples:
Examples:
.. code-block:: python
.. code-block:: python
theta = fluid.layers.data(name="x", shape=[2, 3], dtype="float32")
theta = fluid.layers.data(name="x", shape=[2, 3], dtype="float32")
out_shape = fluid.layers.data(name="y", shape=[-1], dtype="float32")
out_shape = fluid.layers.data(name="y", shape=[-1], dtype="float32")
data = fluid.layers.affine_grid(theta, out_shape)
data = fluid.layers.affine_grid(theta, out_shape)
...
@@ -7192,9 +7216,10 @@ def affine_grid(theta, out_shape, name=None):
...
@@ -7192,9 +7216,10 @@ def affine_grid(theta, out_shape, name=None):
def
rank_loss
(
label
,
left
,
right
,
name
=
None
):
def
rank_loss
(
label
,
left
,
right
,
name
=
None
):
"""
"""
**Rank loss layer for RankNet**
**Rank loss layer for RankNet**
RankNet(http://icml.cc/2015/wp-content/uploads/2015/06/icml_ranking.pdf)
`RankNet <http://icml.cc/2015/wp-content/uploads/2015/06/icml_ranking.pdf>`_
is a pairwise ranking model with a training sample consisting of a pair
is a pairwise ranking model with a training sample consisting of a pair
of documents, A and B. Label P indicates whether A is ranked higher than B
of documents, A and B. Label P indicates whether A is ranked higher than B
or not:
or not:
...
@@ -7202,16 +7227,19 @@ def rank_loss(label, left, right, name=None):
...
@@ -7202,16 +7227,19 @@ def rank_loss(label, left, right, name=None):
P = {0, 1} or {0, 0.5, 1}, where 0.5 means that there is no information
P = {0, 1} or {0, 0.5, 1}, where 0.5 means that there is no information
about the rank of the input pair.
about the rank of the input pair.
Rank loss layer takes three inputs: left (
o_i), right (o_j
) and
Rank loss layer takes three inputs: left (
:math:`o_i` ), right ( :math:`o_j`
) and
label (
P_{i,j}
). The inputs respectively represent RankNet's output scores
label (
:math:`P_{i,j}`
). The inputs respectively represent RankNet's output scores
for documents A and B and the value of label P. The following equation
for documents A and B and the value of label P. The following equation
computes rank loss C_{i,j} from the inputs:
computes rank loss C_{i,j} from the inputs:
$$
.. math::
C_{i,j} = -
\t
ilde{P_{ij}} * o_{i,j} + \log(1 + e^{o_{i,j}})
\\
o_{i,j} = o_i - o_j
\\
C_{i,j} &= -
\\
tilde{P_{ij}} * o_{i,j} + \log(1 + e^{o_{i,j}})
\\\\
\t
ilde{P_{i,j}} = \left \{0, 0.5, 1
\r
ight \} \ or \ \left \{0, 1
\r
ight \}
$$
o_{i,j} &= o_i - o_j
\\\\
\\
tilde{P_{i,j}} &=
\\
left \{0, 0.5, 1
\\
right \} \ or \
\\
left \{0, 1
\\
right \}
Rank loss layer takes batch inputs with size batch_size (batch_size >= 1).
Rank loss layer takes batch inputs with size batch_size (batch_size >= 1).
...
@@ -7237,7 +7265,6 @@ def rank_loss(label, left, right, name=None):
...
@@ -7237,7 +7265,6 @@ def rank_loss(label, left, right, name=None):
right = fluid.layers.data(name="right", shape=[4, 1], dtype="float32")
right = fluid.layers.data(name="right", shape=[4, 1], dtype="float32")
out = fluid.layers.rank_loss(label, left, right)
out = fluid.layers.rank_loss(label, left, right)
"""
"""
helper
=
LayerHelper
(
'rank_loss'
,
**
locals
())
helper
=
LayerHelper
(
'rank_loss'
,
**
locals
())
...
@@ -7269,7 +7296,7 @@ def margin_rank_loss(label, left, right, margin=0.1, name=None):
...
@@ -7269,7 +7296,7 @@ def margin_rank_loss(label, left, right, margin=0.1, name=None):
.. math::
.. math::
rank\_loss
&
= max(0, -label * (left - right) + margin)
rank\_loss = max(0, -label * (left - right) + margin)
Args:
Args:
label (Variable): Indicates whether the left is ranked higher than the right or not.
label (Variable): Indicates whether the left is ranked higher than the right or not.
...
@@ -7278,12 +7305,17 @@ def margin_rank_loss(label, left, right, margin=0.1, name=None):
...
@@ -7278,12 +7305,17 @@ def margin_rank_loss(label, left, right, margin=0.1, name=None):
margin (float): Indicates the given margin.
margin (float): Indicates the given margin.
name (str|None): A name for this layer (optional). If set None, the layer
name (str|None): A name for this layer (optional). If set None, the layer
will be named automatically.
will be named automatically.
Returns:
Returns:
Variable: The ranking loss.
Variable: The ranking loss.
Raises:
Raises:
ValueError: Any of label, left, and right is not a Variable.
ValueError: Any of label, left, and right is not a Variable.
Examples:
Examples:
.. code-block:: python
.. code-block:: python
label = fluid.layers.data(name="label", shape=[4, 1], dtype="float32")
label = fluid.layers.data(name="label", shape=[4, 1], dtype="float32")
left = fluid.layers.data(name="left", shape=[4, 1], dtype="float32")
left = fluid.layers.data(name="left", shape=[4, 1], dtype="float32")
right = fluid.layers.data(name="right", shape=[4, 1], dtype="float32")
right = fluid.layers.data(name="right", shape=[4, 1], dtype="float32")
...
@@ -7587,7 +7619,8 @@ def prelu(x, mode, param_attr=None, name=None):
...
@@ -7587,7 +7619,8 @@ def prelu(x, mode, param_attr=None, name=None):
"""
"""
Equation:
Equation:
y = \max(0, x) + alpha * \min(0, x)
.. math::
y = \max(0, x) +
\\
alpha * \min(0, x)
Args:
Args:
x (Variable): The input tensor.
x (Variable): The input tensor.
...
@@ -7653,8 +7686,8 @@ def brelu(x, t_min=0.0, t_max=24.0, name=None):
...
@@ -7653,8 +7686,8 @@ def brelu(x, t_min=0.0, t_max=24.0, name=None):
.. code-block:: python
.. code-block:: python
x = fluid.layers.data(name="x", shape=[2,3,16,16], dtype="float32")
x = fluid.layers.data(name="x", shape=[2,3,16,16], dtype="float32")
y = fluid.layers.brelu(x, t_min=1.0, t_max=20.0)
y = fluid.layers.brelu(x, t_min=1.0, t_max=20.0)
"""
"""
helper
=
LayerHelper
(
'brelu'
,
**
locals
())
helper
=
LayerHelper
(
'brelu'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
...
@@ -7683,8 +7716,8 @@ def leaky_relu(x, alpha=0.02, name=None):
...
@@ -7683,8 +7716,8 @@ def leaky_relu(x, alpha=0.02, name=None):
.. code-block:: python
.. code-block:: python
x = fluid.layers.data(name="x", shape=[2,3,16,16], dtype="float32")
x = fluid.layers.data(name="x", shape=[2,3,16,16], dtype="float32")
y = fluid.layers.leaky_relu(x, alpha=0.01)
y = fluid.layers.leaky_relu(x, alpha=0.01)
"""
"""
helper
=
LayerHelper
(
'leaky_relu'
,
**
locals
())
helper
=
LayerHelper
(
'leaky_relu'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
...
@@ -7712,8 +7745,8 @@ def soft_relu(x, threshold=40.0, name=None):
...
@@ -7712,8 +7745,8 @@ def soft_relu(x, threshold=40.0, name=None):
.. code-block:: python
.. code-block:: python
x = fluid.layers.data(name="x", shape=[2,3,16,16], dtype="float32")
x = fluid.layers.data(name="x", shape=[2,3,16,16], dtype="float32")
y = fluid.layers.soft_relu(x, threshold=20.0)
y = fluid.layers.soft_relu(x, threshold=20.0)
"""
"""
helper
=
LayerHelper
(
'soft_relu'
,
**
locals
())
helper
=
LayerHelper
(
'soft_relu'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
...
@@ -7729,23 +7762,32 @@ def flatten(x, axis=1, name=None):
...
@@ -7729,23 +7762,32 @@ def flatten(x, axis=1, name=None):
"""
"""
**Flatten layer**
**Flatten layer**
Flattens the input tensor into a 2D matrix.
Flattens the input tensor into a 2D matrix.
For Example:
.. code-block:: text
Examples:
Case 1:
Case 1:
Given
Given
X.shape = (3, 100, 100, 4)
X.shape = (3, 100, 100, 4)
and
axis = 2
and
We get:
axis = 2
Out.shape = (3 * 100, 4 * 100)
We get:
Case 2:
Out.shape = (3 * 100, 4 * 100)
Given
X.shape = (3, 100, 100, 4)
Case 2:
and
axis = 0
Given
We get:
X.shape = (3, 100, 100, 4)
Out.shape = (1, 3 * 100 * 100 * 4)
and
axis = 0
We get:
Out.shape = (1, 3 * 100 * 100 * 4)
Args:
Args:
x (Variable): A tensor of rank >= axis.
x (Variable): A tensor of rank >= axis.
...
@@ -7759,9 +7801,9 @@ def flatten(x, axis=1, name=None):
...
@@ -7759,9 +7801,9 @@ def flatten(x, axis=1, name=None):
will be named automatically.
will be named automatically.
Returns:
Returns:
Variable: A 2D tensor with the contents of the input tensor, with input
Variable: A 2D tensor with the contents of the input tensor, with input
\
dimensions up to axis flattened to the outer dimension of
dimensions up to axis flattened to the outer dimension of
\
the output and remaining input dimensions flattened into the
the output and remaining input dimensions flattened into the
\
inner dimension of the output.
inner dimension of the output.
Raises:
Raises:
...
@@ -7801,19 +7843,23 @@ def sequence_enumerate(input, win_size, pad_value=0, name=None):
...
@@ -7801,19 +7843,23 @@ def sequence_enumerate(input, win_size, pad_value=0, name=None):
The enumerated sequence has the same 1st dimension with variable `input`, and
The enumerated sequence has the same 1st dimension with variable `input`, and
the 2nd dimension is `win_size`, padded by `pad_value` if necessary in generation.
the 2nd dimension is `win_size`, padded by `pad_value` if necessary in generation.
Examples:
.. code-block:: text
Case 1:
Input:
Case 1:
X.lod = [[0, 3, 5]]
X.data = [[1], [2], [3], [4], [5]]
Input:
X.dims = [5, 1]
X.lod = [[0, 3, 5]]
Attrs:
X.data = [[1], [2], [3], [4], [5]]
win_size = 2
X.dims = [5, 1]
pad_value = 0
Output:
Attrs:
Out.lod = [[0, 3, 5]]
win_size = 2
Out.data = [[1, 2], [2, 3], [3, 0], [4, 5], [5, 0]]
pad_value = 0
Out.dims = [5, 2]
Output:
Out.lod = [[0, 3, 5]]
Out.data = [[1, 2], [2, 3], [3, 0], [4, 5], [5, 0]]
Out.dims = [5, 2]
Args:
Args:
input (Variable): The input variable which is a index sequence.
input (Variable): The input variable which is a index sequence.
...
@@ -8896,6 +8942,7 @@ def similarity_focus(input, axis, indexes, name=None):
...
@@ -8896,6 +8942,7 @@ def similarity_focus(input, axis, indexes, name=None):
SimilarityFocus Operator
SimilarityFocus Operator
Generate a similarity focus mask with the same shape of input using the following method:
Generate a similarity focus mask with the same shape of input using the following method:
1. Extract the 3-D tensor(here the first dimension is BatchSize) corresponding
1. Extract the 3-D tensor(here the first dimension is BatchSize) corresponding
to the axis according to the indexes. For example, if axis=1 and indexes=[a],
to the axis according to the indexes. For example, if axis=1 and indexes=[a],
it will get the matrix T=X[:, a, :, :]. In this case, if the shape of input X
it will get the matrix T=X[:, a, :, :]. In this case, if the shape of input X
...
@@ -8969,14 +9016,16 @@ def similarity_focus(input, axis, indexes, name=None):
...
@@ -8969,14 +9016,16 @@ def similarity_focus(input, axis, indexes, name=None):
indexes(list): Indicating the indexes of the selected dimension.
indexes(list): Indicating the indexes of the selected dimension.
Returns:
Returns:
Variable: A tensor variable with the same shape and same type
Variable: A tensor variable with the same shape and same type
\
as the input.
as the input.
Examples:
Examples:
.. code-block:: python
.. code-block:: python
data = fluid.layers.data(
data = fluid.layers.data(
name='data', shape=[2, 3, 2, 2], dtype='float32')
name='data', shape=[2, 3, 2, 2], dtype='float32')
x = fluid.layers.layer_norm(input=data, axis=1, indexes=[0])
x = fluid.layers.layer_norm(input=data, axis=1, indexes=[0])
"""
"""
helper
=
LayerHelper
(
'similarity_focus'
,
**
locals
())
helper
=
LayerHelper
(
'similarity_focus'
,
**
locals
())
# check attrs
# check attrs
...
@@ -9055,6 +9104,7 @@ def hash(input, hash_size, num_hash=1, name=None):
...
@@ -9055,6 +9104,7 @@ def hash(input, hash_size, num_hash=1, name=None):
Examples:
Examples:
.. code-block:: python
.. code-block:: python
word_dict = paddle.dataset.imdb.word_dict()
word_dict = paddle.dataset.imdb.word_dict()
x = fluid.layers.data(shape[1], dtype='int32', lod_level=1)
x = fluid.layers.data(shape[1], dtype='int32', lod_level=1)
out = fluid.layers.hash(input=x, num_hash=4, hash_size=1000)
out = fluid.layers.hash(input=x, num_hash=4, hash_size=1000)
...
@@ -9075,50 +9125,52 @@ def hash(input, hash_size, num_hash=1, name=None):
...
@@ -9075,50 +9125,52 @@ def hash(input, hash_size, num_hash=1, name=None):
def
grid_sampler
(
x
,
grid
,
name
=
None
):
def
grid_sampler
(
x
,
grid
,
name
=
None
):
"""
"""
This operation samples input X by using bilinear interpolation based on
This operation samples input X by using bilinear interpolation based on
flow field grid, which is usually gennerated by
affine_grid
. The grid of
flow field grid, which is usually gennerated by
:code:`affine_grid`
. The grid of
shape [N, H, W, 2] is the concatenation of (grid_x, grid_y) coordinates
shape [N, H, W, 2] is the concatenation of (grid_x, grid_y) coordinates
with shape [N, H, W] each, where grid_x is indexing the 4th dimension
with shape [N, H, W] each, where grid_x is indexing the 4th dimension
(in width dimension) of input data x and grid_y is indexng the 3rd
(in width dimension) of input data x and grid_y is indexng the 3rd
dimention (in height dimension), finally results is the bilinear
dimention (in height dimension), finally results is the bilinear
interpolation value of 4 nearest corner points.
interpolation value of 4 nearest corner points.
Step 1:
.. code-block:: text
Get (x, y) grid coordinates and scale to [0, H-1/W-1].
grid_x = 0.5 * (grid[:, :, :, 0] + 1) * (W - 1)
Step 1:
grid_y = 0.5 * (grid[:, :, :, 1] + 1) * (H - 1)
Get (x, y) grid coordinates and scale to [0, H-1/W-1].
Step 2:
grid_x = 0.5 * (grid[:, :, :, 0] + 1) * (W - 1)
Indices input data X with grid (x, y) in each [H, W] area, and bilinear
grid_y = 0.5 * (grid[:, :, :, 1] + 1) * (H - 1)
interpolate point value by 4 nearest points.
wn ------- y_n ------- en
Step 2:
| | |
Indices input data X with grid (x, y) in each [H, W] area, and bilinear
| d_n |
interpolate point value by 4 nearest points.
| | |
x_w --d_w-- grid--d_e-- x_e
| | |
| d_s |
| | |
ws ------- y_s ------- wn
x_w = floor(x) // west side x coord
wn ------- y_n ------- en
x_e = x_w + 1 // east side x coord
| | |
y_n = floor(y) // north side y coord
| d_n |
y_s = y_s + 1 // south side y coord
| | |
x_w --d_w-- grid--d_e-- x_e
| | |
| d_s |
| | |
ws ------- y_s ------- wn
d_w = grid_x - x_w // distance to west side
x_w = floor(x) // west side x coord
d_e = x_e - grid_x // distance to east side
x_e = x_w + 1 // east side x coord
d_n = grid_y - y_n // distance to north side
y_n = floor(y) // north side y coord
d_s = y_s - grid_y // distance to south side
y_s = y_s + 1 // south side y coord
wn = X[:, :, y_n, x_w] // north-west point valu
e
d_w = grid_x - x_w // distance to west sid
e
en = X[:, :, y_n, x_e] // north-east point valu
e
d_e = x_e - grid_x // distance to east sid
e
ws = X[:, :, y_s, x_w] // south-east point valu
e
d_n = grid_y - y_n // distance to north sid
e
es = X[:, :, y_s, x_w] // north-east point valu
e
d_s = y_s - grid_y // distance to south sid
e
output = wn * d_e * d_s + en * d_w * d_s
wn = X[:, :, y_n, x_w] // north-west point value
+ ws * d_e * d_n + es * d_w * d_n
en = X[:, :, y_n, x_e] // north-east point value
ws = X[:, :, y_s, x_w] // south-east point value
es = X[:, :, y_s, x_w] // north-east point value
output = wn * d_e * d_s + en * d_w * d_s
+ ws * d_e * d_n + es * d_w * d_n
Args:
Args:
x(Variable): Input data of shape [N, C, H, W].
x(Variable): Input data of shape [N, C, H, W].
...
@@ -9126,16 +9178,18 @@ def grid_sampler(x, grid, name=None):
...
@@ -9126,16 +9178,18 @@ def grid_sampler(x, grid, name=None):
name (str, default None): The name of this layer.
name (str, default None): The name of this layer.
Returns:
Returns:
out(Variable)
: Output of shape [N, C, H, W] data samples input X
Variable
: Output of shape [N, C, H, W] data samples input X
using bilnear interpolation based on input grid.
using bilnear interpolation based on input grid.
Exmples:
Examples:
.. code-block:: python
.. code-block:: python
x = fluid.layers.data(name='x', shape=[3, 10, 32, 32], dtype='float32')
theta = fluid.layers.data(name='theta', shape=[3, 2, 3], dtype='float32')
grid = fluid.layers.affine_grid(input=theta, size=[3, 10, 32, 32]})
out = fluid.layers.grid_sampler(x=x, grid=grid)
x = fluid.layers.data(name='x', shape=[3, 10, 32, 32], dtype='float32')
theta = fluid.layers.data(name='theta', shape=[3, 2, 3], dtype='float32')
grid = fluid.layers.affine_grid(input=theta, size=[3, 10, 32, 32]})
out = fluid.layers.grid_sampler(x=x, grid=grid)
"""
"""
helper
=
LayerHelper
(
"grid_sampler"
,
**
locals
())
helper
=
LayerHelper
(
"grid_sampler"
,
**
locals
())
...
@@ -9203,19 +9257,19 @@ def add_position_encoding(input, alpha, beta, name=None):
...
@@ -9203,19 +9257,19 @@ def add_position_encoding(input, alpha, beta, name=None):
"""
"""
**Add Position Encoding Layer**
**Add Position Encoding Layer**
This layer accepts an input 3D-Tensor of shape [N x M x P], and return an
This layer accepts an input 3D-Tensor of shape [N x M x P], and return
s
an
output Tensor of shape [N x M x P] with positional encoding value.
output Tensor of shape [N x M x P] with positional encoding value.
Refer to `Attention Is All You Need<http://arxiv.org/pdf/1706.03762.pdf>`_ .
Refer to `Attention Is All You Need
<http://arxiv.org/pdf/1706.03762.pdf>`_ .
.. math::
.. math::
PE(pos, 2i) =
\\
sin{(pos / 10000^{2i / P})}
\\\\
PE(pos, 2i)
&
=
\\
sin{(pos / 10000^{2i / P})}
\\\\
PE(pos, 2i + 1) =
\\
cos{(pos / 10000^{2i / P})}
\\\\
PE(pos, 2i + 1)
&
=
\\
cos{(pos / 10000^{2i / P})}
\\\\
Out(:, pos, i) =
\\
alpha * input(:, pos, i) +
\\
beta * PE(pos, i)
Out(:, pos, i)
&
=
\\
alpha * input(:, pos, i) +
\\
beta * PE(pos, i)
Where:
Where:
* PE(pos, 2i)
: the increment for the number at even position
- :math:`PE(pos, 2i)`
: the increment for the number at even position
* PE(pos, 2i + 1)
: the increment for the number at odd position
- :math:`PE(pos, 2i + 1)`
: the increment for the number at odd position
Args:
Args:
input (Variable): 3-D input tensor with shape [N x M x P]
input (Variable): 3-D input tensor with shape [N x M x P]
...
@@ -9230,6 +9284,7 @@ def add_position_encoding(input, alpha, beta, name=None):
...
@@ -9230,6 +9284,7 @@ def add_position_encoding(input, alpha, beta, name=None):
.. code-block:: python
.. code-block:: python
position_tensor = fluid.layers.add_position_encoding(input=tensor)
position_tensor = fluid.layers.add_position_encoding(input=tensor)
"""
"""
helper
=
LayerHelper
(
'add_position_encoding'
,
**
locals
())
helper
=
LayerHelper
(
'add_position_encoding'
,
**
locals
())
dtype
=
helper
.
input_dtype
()
dtype
=
helper
.
input_dtype
()
...
@@ -9262,13 +9317,13 @@ def bilinear_tensor_product(x,
...
@@ -9262,13 +9317,13 @@ def bilinear_tensor_product(x,
For example:
For example:
.. math::
.. math::
out{i} = x * W_{i} * {y^\mathrm{T}}, i=0,1,...,size-1
out
_
{i} = x * W_{i} * {y^\mathrm{T}}, i=0,1,...,size-1
In this formula:
In this formula:
- :math:`x`: the first input contains M elements, shape is [batch_size, M].
- :math:`x`: the first input contains M elements, shape is [batch_size, M].
- :math:`y`: the second input contains N elements, shape is [batch_size, N].
- :math:`y`: the second input contains N elements, shape is [batch_size, N].
- :math:`W_{i}`: the i-th learned weight, shape is [M, N]
- :math:`W_{i}`: the i-th learned weight, shape is [M, N]
- :math:`out{i}`: the i-th element of out, shape is [batch_size, size].
- :math:`out
_
{i}`: the i-th element of out, shape is [batch_size, size].
- :math:`y^\mathrm{T}`: the transpose of :math:`y_{2}`.
- :math:`y^\mathrm{T}`: the transpose of :math:`y_{2}`.
Args:
Args:
...
...
python/paddle/fluid/layers/tensor.py
浏览文件 @
48324c32
...
@@ -393,9 +393,6 @@ def fill_constant_batch_size_like(input,
...
@@ -393,9 +393,6 @@ def fill_constant_batch_size_like(input,
It also sets *stop_gradient* to True.
It also sets *stop_gradient* to True.
>>> data = fluid.layers.fill_constant_batch_size_like(
>>> input=like, shape=[1], value=0, dtype='int64')
Args:
Args:
input(${input_type}): ${input_comment}.
input(${input_type}): ${input_comment}.
...
@@ -411,6 +408,14 @@ def fill_constant_batch_size_like(input,
...
@@ -411,6 +408,14 @@ def fill_constant_batch_size_like(input,
Returns:
Returns:
${out_comment}.
${out_comment}.
Examples:
.. code-block:: python
data = fluid.layers.fill_constant_batch_size_like(
input=like, shape=[1], value=0, dtype='int64')
"""
"""
helper
=
LayerHelper
(
"fill_constant_batch_size_like"
,
**
locals
())
helper
=
LayerHelper
(
"fill_constant_batch_size_like"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
dtype
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
dtype
)
...
...
python/paddle/fluid/metrics.py
浏览文件 @
48324c32
...
@@ -361,8 +361,8 @@ class ChunkEvaluator(MetricBase):
...
@@ -361,8 +361,8 @@ class ChunkEvaluator(MetricBase):
Accumulate counter numbers output by chunk_eval from mini-batches and
Accumulate counter numbers output by chunk_eval from mini-batches and
compute the precision recall and F1-score using the accumulated counter
compute the precision recall and F1-score using the accumulated counter
numbers.
numbers.
For some basics of chunking, please refer to
For some basics of chunking, please refer to
'Chunking with Support Vector Machines <https://aclanthology.info/pdf/N/N01/N01-1025.pdf>'
.
`Chunking with Support Vector Machines <https://aclanthology.info/pdf/N/N01/N01-1025.pdf>`_
.
ChunkEvalEvaluator computes the precision, recall, and F1-score of chunk detection,
ChunkEvalEvaluator computes the precision, recall, and F1-score of chunk detection,
and supports IOB, IOE, IOBES and IO (also known as plain) tagging schemes.
and supports IOB, IOE, IOBES and IO (also known as plain) tagging schemes.
...
@@ -391,6 +391,7 @@ class ChunkEvaluator(MetricBase):
...
@@ -391,6 +391,7 @@ class ChunkEvaluator(MetricBase):
def
update
(
self
,
num_infer_chunks
,
num_label_chunks
,
num_correct_chunks
):
def
update
(
self
,
num_infer_chunks
,
num_label_chunks
,
num_correct_chunks
):
"""
"""
Update the states based on the layers.chunk_eval() ouputs.
Update the states based on the layers.chunk_eval() ouputs.
Args:
Args:
num_infer_chunks(int|numpy.array): The number of chunks in Inference on the given minibatch.
num_infer_chunks(int|numpy.array): The number of chunks in Inference on the given minibatch.
num_label_chunks(int|numpy.array): The number of chunks in Label on the given mini-batch.
num_label_chunks(int|numpy.array): The number of chunks in Label on the given mini-batch.
...
@@ -450,9 +451,9 @@ class EditDistance(MetricBase):
...
@@ -450,9 +451,9 @@ class EditDistance(MetricBase):
distance, instance_error = distance_evaluator.eval()
distance, instance_error = distance_evaluator.eval()
In the above example:
In the above example:
'distance' is the average of the edit distance in a pass.
'instance_error' is the instance error rate in a pass.
- 'distance' is the average of the edit distance in a pass.
- 'instance_error' is the instance error rate in a pass.
"""
"""
...
@@ -567,12 +568,15 @@ class DetectionMAP(object):
...
@@ -567,12 +568,15 @@ class DetectionMAP(object):
Calculate the detection mean average precision (mAP).
Calculate the detection mean average precision (mAP).
The general steps are as follows:
The general steps are as follows:
1. calculate the true positive and false positive according to the input
1. calculate the true positive and false positive according to the input
of detection and labels.
of detection and labels.
2. calculate mAP value, support two versions: '11 point' and 'integral'.
2. calculate mAP value, support two versions: '11 point' and 'integral'.
Please get more information from the following articles:
Please get more information from the following articles:
https://sanchom.wordpress.com/tag/average-precision/
https://sanchom.wordpress.com/tag/average-precision/
https://arxiv.org/abs/1512.02325
https://arxiv.org/abs/1512.02325
Args:
Args:
...
@@ -613,10 +617,12 @@ class DetectionMAP(object):
...
@@ -613,10 +617,12 @@ class DetectionMAP(object):
for data in batches:
for data in batches:
loss, cur_map_v, accum_map_v = exe.run(fetch_list=fetch)
loss, cur_map_v, accum_map_v = exe.run(fetch_list=fetch)
In the above example:
In the above example:
- 'cur_map_v' is the mAP of current mini-batch.
- 'accum_map_v' is the accumulative mAP of one pass.
'cur_map_v' is the mAP of current mini-batch.
'accum_map_v' is the accumulative mAP of one pass.
"""
"""
def
__init__
(
self
,
def
__init__
(
self
,
...
...
python/paddle/fluid/tests/unittests/test_conv2d_fusion_op.py
浏览文件 @
48324c32
...
@@ -32,6 +32,8 @@ class TestConv2dFusionOp(OpTest):
...
@@ -32,6 +32,8 @@ class TestConv2dFusionOp(OpTest):
self
.
activation
=
'relu'
self
.
activation
=
'relu'
self
.
add_bias
=
True
self
.
add_bias
=
True
self
.
add_residual_data
=
True
self
.
add_residual_data
=
True
self
.
channels
=
None
self
.
outputs
=
None
self
.
init_group
()
self
.
init_group
()
self
.
init_dilation
()
self
.
init_dilation
()
...
@@ -49,8 +51,8 @@ class TestConv2dFusionOp(OpTest):
...
@@ -49,8 +51,8 @@ class TestConv2dFusionOp(OpTest):
input
=
np
.
random
.
random
(
self
.
input_size
).
astype
(
self
.
dtype
)
input
=
np
.
random
.
random
(
self
.
input_size
).
astype
(
self
.
dtype
)
filter
=
np
.
random
.
random
(
self
.
filter_size
).
astype
(
self
.
dtype
)
filter
=
np
.
random
.
random
(
self
.
filter_size
).
astype
(
self
.
dtype
)
output
=
conv2d_forward_naive
(
input
,
filter
,
self
.
groups
,
self
.
output
=
conv2d_forward_naive
(
input
,
filter
,
self
.
groups
,
conv2d_param
).
astype
(
self
.
dtype
)
conv2d_param
).
astype
(
self
.
dtype
)
self
.
inputs
=
{
self
.
inputs
=
{
'Input'
:
OpTest
.
np_dtype_to_fluid_dtype
(
input
),
'Input'
:
OpTest
.
np_dtype_to_fluid_dtype
(
input
),
...
@@ -58,19 +60,20 @@ class TestConv2dFusionOp(OpTest):
...
@@ -58,19 +60,20 @@ class TestConv2dFusionOp(OpTest):
}
}
if
self
.
add_residual_data
:
if
self
.
add_residual_data
:
residual_data
=
np
.
random
.
random
(
output
.
shape
).
astype
(
self
.
dtype
)
residual_data
=
np
.
random
.
random
(
self
.
output
.
shape
).
astype
(
self
.
dtype
)
self
.
inputs
[
'ResidualData'
]
=
OpTest
.
np_dtype_to_fluid_dtype
(
self
.
inputs
[
'ResidualData'
]
=
OpTest
.
np_dtype_to_fluid_dtype
(
residual_data
)
residual_data
)
output
+=
residual_data
self
.
output
+=
residual_data
if
self
.
add_bias
:
if
self
.
add_bias
:
bias
=
np
.
random
.
random
(
self
.
filter_size
[
0
]).
astype
(
self
.
dtype
)
bias
=
np
.
random
.
random
(
self
.
filter_size
[
0
]).
astype
(
self
.
dtype
)
self
.
inputs
[
'Bias'
]
=
OpTest
.
np_dtype_to_fluid_dtype
(
bias
)
self
.
inputs
[
'Bias'
]
=
OpTest
.
np_dtype_to_fluid_dtype
(
bias
)
output
=
output
+
bias
.
reshape
((
1
,
bias
.
size
,
1
,
1
))
self
.
output
=
self
.
output
+
bias
.
reshape
((
1
,
bias
.
size
,
1
,
1
))
assert
self
.
activation
in
[
'relu'
,
'identity'
]
assert
self
.
activation
in
[
'relu'
,
'identity'
]
if
self
.
activation
==
'relu'
:
if
self
.
activation
==
'relu'
:
output
=
np
.
maximum
(
output
,
0
)
self
.
output
=
np
.
maximum
(
self
.
output
,
0
)
self
.
attrs
=
{
self
.
attrs
=
{
'strides'
:
self
.
stride
,
'strides'
:
self
.
stride
,
...
@@ -79,9 +82,12 @@ class TestConv2dFusionOp(OpTest):
...
@@ -79,9 +82,12 @@ class TestConv2dFusionOp(OpTest):
'dilations'
:
self
.
dilations
,
'dilations'
:
self
.
dilations
,
'data_format'
:
self
.
data_format
,
'data_format'
:
self
.
data_format
,
'exhaustive_search'
:
self
.
exhaustive_search
,
'exhaustive_search'
:
self
.
exhaustive_search
,
'activation'
:
self
.
activation
'activation'
:
self
.
activation
,
'split_channels'
:
self
.
channels
}
}
self
.
outputs
=
{
'Output'
:
output
}
self
.
outputs
=
{
'Output'
:
self
.
output
}
self
.
set_outputs
()
def
testcuda
(
self
):
def
testcuda
(
self
):
return
core
.
is_compiled_with_cuda
()
return
core
.
is_compiled_with_cuda
()
...
@@ -117,6 +123,9 @@ class TestConv2dFusionOp(OpTest):
...
@@ -117,6 +123,9 @@ class TestConv2dFusionOp(OpTest):
def
set_search_method
(
self
):
def
set_search_method
(
self
):
self
.
exhaustive_search
=
False
self
.
exhaustive_search
=
False
def
set_outputs
(
self
):
pass
class
TestWithoutResidual
(
TestConv2dFusionOp
):
class
TestWithoutResidual
(
TestConv2dFusionOp
):
def
init_bias_residual
(
self
):
def
init_bias_residual
(
self
):
...
@@ -160,5 +169,21 @@ class TestCUDNNExhaustiveSearch(TestConv2dFusionOp):
...
@@ -160,5 +169,21 @@ class TestCUDNNExhaustiveSearch(TestConv2dFusionOp):
self
.
exhaustive_search
=
True
self
.
exhaustive_search
=
True
class
TestMultipleOutputs
(
TestConv2dFusionOp
):
def
init_test_case
(
self
):
self
.
pad
=
[
1
,
1
]
self
.
stride
=
[
1
,
1
]
self
.
input_size
=
[
1
,
32
,
17
,
17
]
# NCHW
assert
np
.
mod
(
self
.
input_size
[
1
],
self
.
groups
)
==
0
f_c
=
self
.
input_size
[
1
]
//
self
.
groups
self
.
filter_size
=
[
126
,
f_c
,
3
,
3
]
self
.
channels
=
[
84
,
42
]
def
set_outputs
(
self
):
out1
=
self
.
output
[:,
0
:
84
,
:,
:]
out2
=
self
.
output
[:,
84
:
126
,
:,
:]
self
.
outputs
[
'Outputs'
]
=
[(
'out1'
,
out1
),
(
'out2'
,
out2
)]
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
unittest
.
main
()
unittest
.
main
()
python/paddle/fluid/tests/unittests/test_layers.py
浏览文件 @
48324c32
...
@@ -243,6 +243,10 @@ class TestBook(unittest.TestCase):
...
@@ -243,6 +243,10 @@ class TestBook(unittest.TestCase):
pool
,
mask
=
layers
.
adaptive_pool2d
(
x
,
[
3
,
3
],
require_index
=
True
)
pool
,
mask
=
layers
.
adaptive_pool2d
(
x
,
[
3
,
3
],
require_index
=
True
)
self
.
assertIsNotNone
(
pool
)
self
.
assertIsNotNone
(
pool
)
self
.
assertIsNotNone
(
mask
)
self
.
assertIsNotNone
(
mask
)
self
.
assertIsNotNone
(
layers
.
adaptive_pool2d
(
x
,
3
,
pool_type
=
'avg'
))
pool
,
mask
=
layers
.
adaptive_pool2d
(
x
,
3
,
require_index
=
True
)
self
.
assertIsNotNone
(
pool
)
self
.
assertIsNotNone
(
mask
)
def
test_adaptive_pool3d
(
self
):
def
test_adaptive_pool3d
(
self
):
program
=
Program
()
program
=
Program
()
...
@@ -255,6 +259,10 @@ class TestBook(unittest.TestCase):
...
@@ -255,6 +259,10 @@ class TestBook(unittest.TestCase):
x
,
[
3
,
3
,
3
],
require_index
=
True
)
x
,
[
3
,
3
,
3
],
require_index
=
True
)
self
.
assertIsNotNone
(
pool
)
self
.
assertIsNotNone
(
pool
)
self
.
assertIsNotNone
(
mask
)
self
.
assertIsNotNone
(
mask
)
self
.
assertIsNotNone
(
layers
.
adaptive_pool3d
(
x
,
3
,
pool_type
=
'avg'
))
pool
,
mask
=
layers
.
adaptive_pool3d
(
x
,
3
,
require_index
=
True
)
self
.
assertIsNotNone
(
pool
)
self
.
assertIsNotNone
(
mask
)
def
test_lstm_unit
(
self
):
def
test_lstm_unit
(
self
):
program
=
Program
()
program
=
Program
()
...
...
python/paddle/fluid/tests/unittests/testsuite.py
浏览文件 @
48324c32
...
@@ -137,9 +137,9 @@ def append_input_output(block, op_proto, np_list, is_input, dtype):
...
@@ -137,9 +137,9 @@ def append_input_output(block, op_proto, np_list, is_input, dtype):
var_dict
=
{}
var_dict
=
{}
for
var_proto
in
proto_list
:
for
var_proto
in
proto_list
:
var_name
=
str
(
var_proto
.
name
)
var_name
=
str
(
var_proto
.
name
)
if
(
var_name
not
in
np_list
)
and
var_proto
.
dispensable
:
continue
if
is_input
:
if
is_input
:
if
(
var_name
not
in
np_list
)
and
var_proto
.
dispensable
:
continue
assert
(
var_name
in
np_list
)
or
(
var_proto
.
dispensable
),
\
assert
(
var_name
in
np_list
)
or
(
var_proto
.
dispensable
),
\
"Missing {} as input"
.
format
(
var_name
)
"Missing {} as input"
.
format
(
var_name
)
if
var_proto
.
duplicable
:
if
var_proto
.
duplicable
:
...
...
python/paddle/fluid/transpiler/distribute_transpiler.py
浏览文件 @
48324c32
...
@@ -125,14 +125,23 @@ def slice_variable(var_list, slice_count, min_block_size):
...
@@ -125,14 +125,23 @@ def slice_variable(var_list, slice_count, min_block_size):
class
DistributeTranspilerConfig
(
object
):
class
DistributeTranspilerConfig
(
object
):
"""
"""
Args:
.. py:attribute:: slice_var_up (bool)
slice_var_up (bool): Do Tensor slice for pservers, default is True.
split_method (PSDispatcher): RoundRobin or HashName can be used
Do Tensor slice for pservers, default is True.
try to choose the best method to balance loads for pservers.
min_block_size (int): Minimum splitted element number in block.
.. py:attribute:: split_method (PSDispatcher)
According:https://github.com/PaddlePaddle/Paddle/issues/8638#issuecomment-369912156
RoundRobin or HashName can be used.
Try to choose the best method to balance loads for pservers.
.. py:attribute:: min_block_size (int)
Minimum number of splitted elements in block.
According to : https://github.com/PaddlePaddle/Paddle/issues/8638#issuecomment-369912156
We can use bandwidth effiently when data size is larger than 2MB.If you
We can use bandwidth effiently when data size is larger than 2MB.If you
want to change it, please be sure you see the slice_variable function.
want to change it, please be sure you have read the slice_variable function.
"""
"""
slice_var_up
=
True
slice_var_up
=
True
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录