Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleDetection
提交
3a68955f
P
PaddleDetection
项目概览
PaddlePaddle
/
PaddleDetection
大约 1 年 前同步成功
通知
694
Star
11112
Fork
2696
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
184
列表
看板
标记
里程碑
合并请求
40
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleDetection
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
184
Issue
184
列表
看板
标记
里程碑
合并请求
40
合并请求
40
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
3a68955f
编写于
10月 09, 2017
作者:
X
X.Dragon
提交者:
GitHub
10月 09, 2017
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #4509 from hedaoyuan/inference
Build mobile inference library for minimum size
上级
3f874143
aa1e92ec
变更
23
隐藏空白更改
内联
并排
Showing
23 changed file
with
192 addition
and
101 deletion
+192
-101
CMakeLists.txt
CMakeLists.txt
+13
-3
cmake/util.cmake
cmake/util.cmake
+37
-19
paddle/CMakeLists.txt
paddle/CMakeLists.txt
+23
-18
paddle/capi/CMakeLists.txt
paddle/capi/CMakeLists.txt
+1
-3
paddle/capi/tests/CMakeLists.txt
paddle/capi/tests/CMakeLists.txt
+9
-8
paddle/gserver/CMakeLists.txt
paddle/gserver/CMakeLists.txt
+30
-0
paddle/gserver/gradientmachines/GradientMachine.cpp
paddle/gserver/gradientmachines/GradientMachine.cpp
+11
-2
paddle/gserver/gradientmachines/GradientMachine.h
paddle/gserver/gradientmachines/GradientMachine.h
+6
-1
paddle/gserver/gradientmachines/NeuralNetwork.cpp
paddle/gserver/gradientmachines/NeuralNetwork.cpp
+14
-4
paddle/gserver/gradientmachines/NeuralNetwork.h
paddle/gserver/gradientmachines/NeuralNetwork.h
+3
-0
paddle/gserver/layers/Layer.cpp
paddle/gserver/layers/Layer.cpp
+6
-1
paddle/gserver/tests/CMakeLists.txt
paddle/gserver/tests/CMakeLists.txt
+39
-31
paddle/gserver/tests/LayerGradUtil.h
paddle/gserver/tests/LayerGradUtil.h
+0
-1
paddle/gserver/tests/test_ActivationGrad.cpp
paddle/gserver/tests/test_ActivationGrad.cpp
+0
-1
paddle/gserver/tests/test_BatchNorm.cpp
paddle/gserver/tests/test_BatchNorm.cpp
+0
-1
paddle/gserver/tests/test_CRFLayerGrad.cpp
paddle/gserver/tests/test_CRFLayerGrad.cpp
+0
-1
paddle/gserver/tests/test_ConvTrans.cpp
paddle/gserver/tests/test_ConvTrans.cpp
+0
-1
paddle/gserver/tests/test_ConvUnify.cpp
paddle/gserver/tests/test_ConvUnify.cpp
+0
-1
paddle/gserver/tests/test_CrossEntropyOverBeamGrad.cpp
paddle/gserver/tests/test_CrossEntropyOverBeamGrad.cpp
+0
-1
paddle/gserver/tests/test_KmaxSeqScore.cpp
paddle/gserver/tests/test_KmaxSeqScore.cpp
+0
-1
paddle/gserver/tests/test_LayerGrad.cpp
paddle/gserver/tests/test_LayerGrad.cpp
+0
-1
paddle/gserver/tests/test_SelectiveFCLayer.cpp
paddle/gserver/tests/test_SelectiveFCLayer.cpp
+0
-1
paddle/gserver/tests/test_SeqSliceLayerGrad.cpp
paddle/gserver/tests/test_SeqSliceLayerGrad.cpp
+0
-1
未找到文件。
CMakeLists.txt
浏览文件 @
3a68955f
...
@@ -86,6 +86,14 @@ if(ANDROID OR IOS)
...
@@ -86,6 +86,14 @@ if(ANDROID OR IOS)
"Disable MKLDNN when cross-compiling for Android and iOS"
FORCE
)
"Disable MKLDNN when cross-compiling for Android and iOS"
FORCE
)
set
(
WITH_MKLML OFF CACHE STRING
set
(
WITH_MKLML OFF CACHE STRING
"Disable MKLML package when cross-compiling for Android and iOS"
FORCE
)
"Disable MKLML package when cross-compiling for Android and iOS"
FORCE
)
# Compile PaddlePaddle mobile inference library
if
(
NOT WITH_C_API
)
set
(
WITH_C_API ON CACHE STRING
"Always compile the C_API when cross-compiling for Android and iOS"
FORCE
)
endif
()
set
(
MOBILE_INFERENCE ON
)
add_definitions
(
-DPADDLE_MOBILE_INFERENCE
)
endif
()
endif
()
set
(
THIRD_PARTY_PATH
"
${
CMAKE_BINARY_DIR
}
/third_party"
CACHE STRING
set
(
THIRD_PARTY_PATH
"
${
CMAKE_BINARY_DIR
}
/third_party"
CACHE STRING
...
@@ -160,9 +168,11 @@ endif(USE_NNPACK)
...
@@ -160,9 +168,11 @@ endif(USE_NNPACK)
add_subdirectory
(
proto
)
add_subdirectory
(
proto
)
# "add_subdirectory(go)" should be placed after the following loine,
if
(
NOT MOBILE_INFERENCE
)
# because it depends on paddle/optimizer.
# "add_subdirectory(go)" should be placed after the following loine,
add_subdirectory
(
paddle/optimizer
)
# because it depends on paddle/optimizer.
add_subdirectory
(
paddle/optimizer
)
endif
()
# "add_subdirectory(paddle)" and "add_subdirectory(python)" should be
# "add_subdirectory(paddle)" and "add_subdirectory(python)" should be
# placed after this block, because they depends on it.
# placed after this block, because they depends on it.
...
...
cmake/util.cmake
浏览文件 @
3a68955f
...
@@ -73,25 +73,43 @@ function(link_paddle_exe TARGET_NAME)
...
@@ -73,25 +73,43 @@ function(link_paddle_exe TARGET_NAME)
generate_rdma_links
()
generate_rdma_links
()
endif
()
endif
()
target_circle_link_libraries
(
${
TARGET_NAME
}
if
(
MOBILE_INFERENCE
)
ARCHIVE_START
target_circle_link_libraries
(
${
TARGET_NAME
}
paddle_gserver
ARCHIVE_START
paddle_function
paddle_gserver
ARCHIVE_END
paddle_function
paddle_pserver
ARCHIVE_END
paddle_trainer_lib
paddle_math
paddle_network
paddle_utils
paddle_math
paddle_parameter
paddle_utils
paddle_proto
paddle_parameter
paddle_cuda
paddle_proto
${
EXTERNAL_LIBS
}
paddle_cuda
${
CMAKE_THREAD_LIBS_INIT
}
paddle_optimizer
${
CMAKE_DL_LIBS
}
${
EXTERNAL_LIBS
}
${
RDMA_LD_FLAGS
}
${
CMAKE_THREAD_LIBS_INIT
}
${
RDMA_LIBS
}
)
${
CMAKE_DL_LIBS
}
else
()
${
RDMA_LD_FLAGS
}
target_circle_link_libraries
(
${
TARGET_NAME
}
${
RDMA_LIBS
}
)
ARCHIVE_START
paddle_gserver
paddle_function
ARCHIVE_END
paddle_pserver
paddle_trainer_lib
paddle_network
paddle_math
paddle_utils
paddle_parameter
paddle_proto
paddle_cuda
paddle_optimizer
${
EXTERNAL_LIBS
}
${
CMAKE_THREAD_LIBS_INIT
}
${
CMAKE_DL_LIBS
}
${
RDMA_LD_FLAGS
}
${
RDMA_LIBS
}
)
endif
()
if
(
ANDROID
)
if
(
ANDROID
)
target_link_libraries
(
${
TARGET_NAME
}
log
)
target_link_libraries
(
${
TARGET_NAME
}
log
)
...
...
paddle/CMakeLists.txt
浏览文件 @
3a68955f
add_subdirectory
(
cuda
)
add_subdirectory
(
cuda
)
add_subdirectory
(
function
)
add_subdirectory
(
function
)
add_subdirectory
(
utils
)
add_subdirectory
(
utils
)
add_subdirectory
(
testing
)
add_subdirectory
(
math
)
add_subdirectory
(
math
)
add_subdirectory
(
parameter
)
add_subdirectory
(
gserver
)
add_subdirectory
(
gserver
)
add_subdirectory
(
pserver
)
add_subdirectory
(
parameter
)
add_subdirectory
(
trainer
)
add_subdirectory
(
testing
)
add_subdirectory
(
scripts
)
add_subdirectory
(
string
)
if
(
Boost_FOUND
)
add_subdirectory
(
memory
)
add_subdirectory
(
platform
)
add_subdirectory
(
framework
)
add_subdirectory
(
operators
)
add_subdirectory
(
pybind
)
endif
()
if
(
WITH_C_API
)
if
(
MOBILE_INFERENCE
)
add_subdirectory
(
capi
)
add_subdirectory
(
capi
)
endif
()
else
()
add_subdirectory
(
pserver
)
add_subdirectory
(
trainer
)
add_subdirectory
(
string
)
add_subdirectory
(
scripts
)
if
(
WITH_C_API
)
add_subdirectory
(
capi
)
endif
()
if
(
Boost_FOUND
)
add_subdirectory
(
memory
)
add_subdirectory
(
platform
)
add_subdirectory
(
framework
)
add_subdirectory
(
operators
)
add_subdirectory
(
pybind
)
endif
()
if
(
WITH_SWIG_PY
)
if
(
WITH_SWIG_PY
)
add_subdirectory
(
api
)
add_subdirectory
(
api
)
endif
()
endif
()
endif
()
paddle/capi/CMakeLists.txt
浏览文件 @
3a68955f
...
@@ -37,9 +37,7 @@ set(PADDLE_CAPI_INFER_LIBS
...
@@ -37,9 +37,7 @@ set(PADDLE_CAPI_INFER_LIBS
paddle_cuda
paddle_cuda
paddle_function
paddle_function
paddle_gserver
paddle_gserver
paddle_proto
paddle_proto
)
paddle_pserver
paddle_network
)
cc_library
(
paddle_capi_whole DEPS paddle_capi
${
PADDLE_CAPI_INFER_LIBS
}
)
cc_library
(
paddle_capi_whole DEPS paddle_capi
${
PADDLE_CAPI_INFER_LIBS
}
)
...
...
paddle/capi/tests/CMakeLists.txt
浏览文件 @
3a68955f
...
@@ -4,11 +4,12 @@ add_unittest(capi_test_mats test_Vector.cpp
...
@@ -4,11 +4,12 @@ add_unittest(capi_test_mats test_Vector.cpp
target_include_directories
(
capi_test_mats PUBLIC
${
PADDLE_CAPI_INC_PATH
}
)
target_include_directories
(
capi_test_mats PUBLIC
${
PADDLE_CAPI_INC_PATH
}
)
target_link_libraries
(
capi_test_mats paddle_capi
)
target_link_libraries
(
capi_test_mats paddle_capi
)
if
(
NOT MOBILE_INFERENCE
)
add_unittest_without_exec
(
capi_test_gradientMachine test_GradientMachine.cpp
)
add_unittest_without_exec
(
capi_test_gradientMachine test_GradientMachine.cpp
)
target_include_directories
(
capi_test_gradientMachine PUBLIC
target_include_directories
(
capi_test_gradientMachine PUBLIC
${
PADDLE_CAPI_INC_PATH
}
)
${
PADDLE_CAPI_INC_PATH
}
)
target_link_libraries
(
capi_test_gradientMachine paddle_capi
)
target_link_libraries
(
capi_test_gradientMachine paddle_capi
)
add_test
(
NAME capi_test_gradientMachine
add_test
(
NAME capi_test_gradientMachine
COMMAND
${
PADDLE_SOURCE_DIR
}
/paddle/.set_python_path.sh -d
${
PADDLE_SOURCE_DIR
}
/python
${
CMAKE_CURRENT_BINARY_DIR
}
/capi_test_gradientMachine
COMMAND
${
PADDLE_SOURCE_DIR
}
/paddle/.set_python_path.sh -d
${
PADDLE_SOURCE_DIR
}
/python
${
CMAKE_CURRENT_BINARY_DIR
}
/capi_test_gradientMachine
WORKING_DIRECTORY
${
PADDLE_SOURCE_DIR
}
/paddle/capi/tests
)
WORKING_DIRECTORY
${
PADDLE_SOURCE_DIR
}
/paddle/capi/tests
)
endif
()
paddle/gserver/CMakeLists.txt
浏览文件 @
3a68955f
...
@@ -60,6 +60,36 @@ if(NOT WITH_PYTHON)
...
@@ -60,6 +60,36 @@ if(NOT WITH_PYTHON)
dataproviders/PyDataProvider.h
)
dataproviders/PyDataProvider.h
)
endif
()
endif
()
if
(
MOBILE_INFERENCE
)
# Remove evaluators
list
(
REMOVE_ITEM GSERVER_SOURCES
layers/ValidationLayer.cpp
evaluators/Evaluator.cpp
evaluators/DetectionMAPEvaluator.cpp
evaluators/CTCErrorEvaluator.cpp
evaluators/ChunkEvaluator.cpp
)
# Remove dataproviders
list
(
REMOVE_ITEM GSERVER_SOURCES
dataproviders/DataProvider.cpp
dataproviders/MultiDataProvider.cpp
dataproviders/ProtoDataProvider.cpp
dataproviders/PyDataProvider2.cpp
dataproviders/PyDataProvider.cpp
)
# Remove useless gradientmachines
list
(
REMOVE_ITEM GSERVER_SOURCES
gradientmachines/MultiNetwork.cpp
gradientmachines/RecurrentGradientMachine.cpp
gradientmachines/ParallelNeuralNetwork.cpp
gradientmachines/GradientMachineMode.cpp
gradientmachines/MultiGradientMachine.cpp
)
# Remove useless layers
list
(
REMOVE_ITEM GSERVER_SOURCES
layers/RecurrentLayerGroup.cpp
)
endif
()
if
(
WITH_GPU
)
if
(
WITH_GPU
)
cuda_add_library
(
paddle_gserver
${
GSERVER_SOURCES
}
)
cuda_add_library
(
paddle_gserver
${
GSERVER_SOURCES
}
)
else
()
else
()
...
...
paddle/gserver/gradientmachines/GradientMachine.cpp
浏览文件 @
3a68955f
...
@@ -17,12 +17,15 @@ limitations under the License. */
...
@@ -17,12 +17,15 @@ limitations under the License. */
#include <fstream>
#include <fstream>
#include "paddle/utils/Logging.h"
#include "paddle/utils/Logging.h"
#include "NeuralNetwork.h"
#include "hl_gpu.h"
#ifndef PADDLE_MOBILE_INFERENCE
#include "GradientMachineMode.h"
#include "GradientMachineMode.h"
#include "MultiGradientMachine.h"
#include "MultiGradientMachine.h"
#include "MultiNetwork.h"
#include "MultiNetwork.h"
#include "NeuralNetwork.h"
#include "ParallelNeuralNetwork.h"
#include "ParallelNeuralNetwork.h"
#
include "hl_gpu.h"
#
endif
namespace
paddle
{
namespace
paddle
{
...
@@ -30,13 +33,16 @@ GradientMachine* GradientMachine::create(
...
@@ -30,13 +33,16 @@ GradientMachine* GradientMachine::create(
const
ModelConfig
&
config
,
const
ModelConfig
&
config
,
int
mode
,
int
mode
,
const
std
::
vector
<
ParameterType
>&
parameterTypes
)
{
const
std
::
vector
<
ParameterType
>&
parameterTypes
)
{
#ifndef PADDLE_MOBILE_INFERENCE
if
(
auto
gm
=
IGradientMachineMode
::
tryCreateGradientMachine
(
mode
,
config
))
{
if
(
auto
gm
=
IGradientMachineMode
::
tryCreateGradientMachine
(
mode
,
config
))
{
return
gm
;
return
gm
;
}
}
if
(
FLAGS_trainer_count
>
1
)
{
if
(
FLAGS_trainer_count
>
1
)
{
return
new
MultiGradientMachine
(
config
,
FLAGS_use_gpu
);
return
new
MultiGradientMachine
(
config
,
FLAGS_use_gpu
);
}
}
#endif
if
(
FLAGS_trainer_count
==
1
)
{
// single
if
(
FLAGS_trainer_count
==
1
)
{
// single
#ifndef PADDLE_MOBILE_INFERENCE
NeuralNetwork
*
nn
;
NeuralNetwork
*
nn
;
if
(
config
.
type
()
==
"multi_nn"
)
{
if
(
config
.
type
()
==
"multi_nn"
)
{
/* multi submodel calculate, thread(s) will be initialized inside */
/* multi submodel calculate, thread(s) will be initialized inside */
...
@@ -48,6 +54,9 @@ GradientMachine* GradientMachine::create(
...
@@ -48,6 +54,9 @@ GradientMachine* GradientMachine::create(
/* single thread calculate */
/* single thread calculate */
nn
=
NeuralNetwork
::
create
(
config
);
nn
=
NeuralNetwork
::
create
(
config
);
}
}
#else
NeuralNetwork
*
nn
=
NeuralNetwork
::
create
(
config
);
#endif
ParamInitCallback
testParamInitCb
=
[](
int
paramId
,
Parameter
*
para
)
{
ParamInitCallback
testParamInitCb
=
[](
int
paramId
,
Parameter
*
para
)
{
para
->
enableType
(
PARAMETER_VALUE
);
para
->
enableType
(
PARAMETER_VALUE
);
};
};
...
...
paddle/gserver/gradientmachines/GradientMachine.h
浏览文件 @
3a68955f
...
@@ -20,13 +20,16 @@ limitations under the License. */
...
@@ -20,13 +20,16 @@ limitations under the License. */
#include "ModelConfig.pb.h"
#include "ModelConfig.pb.h"
#include "TrainerConfig.pb.h"
#include "TrainerConfig.pb.h"
#include "paddle/gserver/dataproviders/DataProvider.h"
#include "paddle/gserver/dataproviders/DataProvider.h"
#include "paddle/gserver/evaluators/Evaluator.h"
#include "paddle/gserver/layers/Layer.h"
#include "paddle/gserver/layers/Layer.h"
#include "paddle/math/Matrix.h"
#include "paddle/math/Matrix.h"
#include "paddle/parameter/Parameter.h"
#include "paddle/parameter/Parameter.h"
#include "paddle/parameter/ParameterUpdaterBase.h"
#include "paddle/parameter/ParameterUpdaterBase.h"
#include "paddle/utils/Thread.h"
#include "paddle/utils/Thread.h"
#ifndef PADDLE_MOBILE_INFERENCE
#include "paddle/gserver/evaluators/Evaluator.h"
#endif
namespace
paddle
{
namespace
paddle
{
/**
/**
* @brief A gradient machine is capable of calculating some outputs given
* @brief A gradient machine is capable of calculating some outputs given
...
@@ -147,6 +150,7 @@ public:
...
@@ -147,6 +150,7 @@ public:
virtual
void
onPassEnd
()
=
0
;
virtual
void
onPassEnd
()
=
0
;
#ifndef PADDLE_MOBILE_INFERENCE
/**
/**
* Create an evaluator which can be used for eval()
* Create an evaluator which can be used for eval()
*/
*/
...
@@ -156,6 +160,7 @@ public:
...
@@ -156,6 +160,7 @@ public:
* evaluate using the given evaluator
* evaluate using the given evaluator
*/
*/
virtual
void
eval
(
Evaluator
*
evaluator
)
const
=
0
;
virtual
void
eval
(
Evaluator
*
evaluator
)
const
=
0
;
#endif
std
::
vector
<
ParameterPtr
>&
getParameters
()
{
return
parameters_
;
}
std
::
vector
<
ParameterPtr
>&
getParameters
()
{
return
parameters_
;
}
...
...
paddle/gserver/gradientmachines/NeuralNetwork.cpp
浏览文件 @
3a68955f
...
@@ -14,15 +14,17 @@ limitations under the License. */
...
@@ -14,15 +14,17 @@ limitations under the License. */
#include "paddle/utils/Util.h"
#include "paddle/utils/Util.h"
#include "NeuralNetwork.h"
#include "hl_gpu.h"
#include "paddle/gserver/layers/AgentLayer.h"
#include "paddle/utils/CustomStackTrace.h"
#include "paddle/utils/CustomStackTrace.h"
#include "paddle/utils/Logging.h"
#include "paddle/utils/Logging.h"
#include "paddle/utils/Stat.h"
#ifndef PADDLE_MOBILE_INFERENCE
#include "MultiNetwork.h"
#include "MultiNetwork.h"
#include "NeuralNetwork.h"
#include "RecurrentGradientMachine.h"
#include "RecurrentGradientMachine.h"
#include "hl_gpu.h"
#endif
#include "paddle/gserver/layers/AgentLayer.h"
#include "paddle/utils/Stat.h"
namespace
paddle
{
namespace
paddle
{
void
parameterInitNN
(
int
paramId
,
void
parameterInitNN
(
int
paramId
,
...
@@ -54,6 +56,7 @@ void parameterInitNN(int paramId,
...
@@ -54,6 +56,7 @@ void parameterInitNN(int paramId,
}
}
NeuralNetwork
*
NeuralNetwork
::
create
(
const
ModelConfig
&
config
)
{
NeuralNetwork
*
NeuralNetwork
::
create
(
const
ModelConfig
&
config
)
{
#ifndef PADDLE_MOBILE_INFERENCE
if
(
config
.
type
()
==
"recurrent_nn"
)
{
if
(
config
.
type
()
==
"recurrent_nn"
)
{
return
newNeuralNetwork
(
"root"
);
return
newNeuralNetwork
(
"root"
);
}
else
if
(
config
.
type
()
==
"multi_nn"
)
{
}
else
if
(
config
.
type
()
==
"multi_nn"
)
{
...
@@ -61,6 +64,9 @@ NeuralNetwork* NeuralNetwork::create(const ModelConfig& config) {
...
@@ -61,6 +64,9 @@ NeuralNetwork* NeuralNetwork::create(const ModelConfig& config) {
}
else
{
}
else
{
return
newNeuralNetwork
();
return
newNeuralNetwork
();
}
}
#else
return
new
NeuralNetwork
();
#endif
}
}
std
::
map
<
std
::
string
,
bool
>
NeuralNetwork
::
dllInitMap
;
std
::
map
<
std
::
string
,
bool
>
NeuralNetwork
::
dllInitMap
;
...
@@ -304,6 +310,8 @@ void NeuralNetwork::onPassEnd() {
...
@@ -304,6 +310,8 @@ void NeuralNetwork::onPassEnd() {
}
}
}
}
#ifndef PADDLE_MOBILE_INFERENCE
class
CombinedEvaluator
:
public
Evaluator
{
class
CombinedEvaluator
:
public
Evaluator
{
public:
public:
void
addEvaluator
(
std
::
unique_ptr
<
Evaluator
>&&
evaluator
)
{
void
addEvaluator
(
std
::
unique_ptr
<
Evaluator
>&&
evaluator
)
{
...
@@ -466,6 +474,8 @@ Evaluator* NeuralNetwork::makeEvaluator() const {
...
@@ -466,6 +474,8 @@ Evaluator* NeuralNetwork::makeEvaluator() const {
void
NeuralNetwork
::
eval
(
Evaluator
*
evaluator
)
const
{
evaluator
->
eval
(
*
this
);
}
void
NeuralNetwork
::
eval
(
Evaluator
*
evaluator
)
const
{
evaluator
->
eval
(
*
this
);
}
#endif
void
NeuralNetwork
::
setOutputGrad
(
const
std
::
vector
<
Argument
>&
args
)
{
void
NeuralNetwork
::
setOutputGrad
(
const
std
::
vector
<
Argument
>&
args
)
{
CHECK_GE
(
outputLayers_
.
size
(),
args
.
size
());
CHECK_GE
(
outputLayers_
.
size
(),
args
.
size
());
for
(
size_t
i
=
0
;
i
<
args
.
size
();
++
i
)
{
for
(
size_t
i
=
0
;
i
<
args
.
size
();
++
i
)
{
...
...
paddle/gserver/gradientmachines/NeuralNetwork.h
浏览文件 @
3a68955f
...
@@ -97,9 +97,12 @@ public:
...
@@ -97,9 +97,12 @@ public:
virtual
void
onPassEnd
();
virtual
void
onPassEnd
();
#ifndef PADDLE_MOBILE_INFERENCE
virtual
Evaluator
*
makeEvaluator
()
const
;
virtual
Evaluator
*
makeEvaluator
()
const
;
virtual
void
eval
(
Evaluator
*
evaluator
)
const
;
virtual
void
eval
(
Evaluator
*
evaluator
)
const
;
#endif
virtual
void
resetState
();
virtual
void
resetState
();
virtual
void
setOutputGrad
(
const
std
::
vector
<
Argument
>&
args
);
virtual
void
setOutputGrad
(
const
std
::
vector
<
Argument
>&
args
);
...
...
paddle/gserver/layers/Layer.cpp
浏览文件 @
3a68955f
...
@@ -15,11 +15,14 @@ limitations under the License. */
...
@@ -15,11 +15,14 @@ limitations under the License. */
#include "paddle/utils/Util.h"
#include "paddle/utils/Util.h"
#include "CostLayer.h"
#include "CostLayer.h"
#include "ValidationLayer.h"
#include "paddle/math/SparseMatrix.h"
#include "paddle/math/SparseMatrix.h"
#include "paddle/utils/Error.h"
#include "paddle/utils/Error.h"
#include "paddle/utils/Logging.h"
#include "paddle/utils/Logging.h"
#ifndef PADDLE_MOBILE_INFERENCE
#include "ValidationLayer.h"
#endif
DEFINE_bool
(
log_error_clipping
,
false
,
"enable log error clipping or not"
);
DEFINE_bool
(
log_error_clipping
,
false
,
"enable log error clipping or not"
);
namespace
paddle
{
namespace
paddle
{
...
@@ -103,10 +106,12 @@ LayerPtr Layer::create(const LayerConfig& config) {
...
@@ -103,10 +106,12 @@ LayerPtr Layer::create(const LayerConfig& config) {
return
LayerPtr
(
new
MultiClassCrossEntropy
(
config
));
return
LayerPtr
(
new
MultiClassCrossEntropy
(
config
));
else
if
(
type
==
"rank-cost"
)
else
if
(
type
==
"rank-cost"
)
return
LayerPtr
(
new
RankingCost
(
config
));
return
LayerPtr
(
new
RankingCost
(
config
));
#ifndef PADDLE_MOBILE_INFERENCE
else
if
(
type
==
"auc-validation"
)
else
if
(
type
==
"auc-validation"
)
return
LayerPtr
(
new
AucValidation
(
config
));
return
LayerPtr
(
new
AucValidation
(
config
));
else
if
(
type
==
"pnpair-validation"
)
else
if
(
type
==
"pnpair-validation"
)
return
LayerPtr
(
new
PnpairValidation
(
config
));
return
LayerPtr
(
new
PnpairValidation
(
config
));
#endif
return
LayerPtr
(
registrar_
.
createByType
(
config
.
type
(),
config
));
return
LayerPtr
(
registrar_
.
createByType
(
config
.
type
(),
config
));
}
}
...
...
paddle/gserver/tests/CMakeLists.txt
浏览文件 @
3a68955f
# gserver pacakge unittests
# gserver pacakge unittests
if
(
NOT MOBILE_INFERENCE
)
################### test_ProtoDataProvider ############
################### test_ProtoDataProvider ############
add_unittest_without_exec
(
test_ProtoDataProvider
add_unittest_without_exec
(
test_ProtoDataProvider
test_ProtoDataProvider.cpp
)
test_ProtoDataProvider.cpp
)
# test_ProtoDataProvider will mkdir as same name,
# test_ProtoDataProvider will mkdir as same name,
# so if WORKING_DIRECTORY is default directory, then
# so if WORKING_DIRECTORY is default directory, then
# mkdir will get error.
# mkdir will get error.
add_test
(
NAME test_ProtoDataProvider
add_test
(
NAME test_ProtoDataProvider
COMMAND
${
CMAKE_CURRENT_BINARY_DIR
}
/test_ProtoDataProvider
COMMAND
${
CMAKE_CURRENT_BINARY_DIR
}
/test_ProtoDataProvider
WORKING_DIRECTORY
${
PADDLE_SOURCE_DIR
}
/paddle
)
WORKING_DIRECTORY
${
PADDLE_SOURCE_DIR
}
/paddle
)
endif
()
################# test_LayerGrad #######################
################# test_LayerGrad #######################
add_unittest_without_exec
(
test_LayerGrad
add_unittest_without_exec
(
test_LayerGrad
...
@@ -98,9 +100,11 @@ add_unittest_without_exec(test_KmaxSeqScore
...
@@ -98,9 +100,11 @@ add_unittest_without_exec(test_KmaxSeqScore
add_test
(
NAME test_KmaxSeqScore
add_test
(
NAME test_KmaxSeqScore
COMMAND test_KmaxSeqScore
)
COMMAND test_KmaxSeqScore
)
if
(
NOT MOBILE_INFERENCE
)
################## test_Evaluator #######################
################## test_Evaluator #######################
add_unittest
(
test_Evaluator
add_unittest
(
test_Evaluator
test_Evaluator.cpp
)
test_Evaluator.cpp
)
endif
()
################ test_LinearChainCRF ####################
################ test_LinearChainCRF ####################
add_simple_unittest
(
test_LinearChainCRF
)
add_simple_unittest
(
test_LinearChainCRF
)
...
@@ -131,27 +135,31 @@ if(NOT WITH_DOUBLE)
...
@@ -131,27 +135,31 @@ if(NOT WITH_DOUBLE)
WORKING_DIRECTORY
${
PADDLE_SOURCE_DIR
}
/paddle
)
WORKING_DIRECTORY
${
PADDLE_SOURCE_DIR
}
/paddle
)
endif
()
endif
()
if
(
NOT MOBILE_INFERENCE
)
############### test_RecurrentGradientMachine ###############
############### test_RecurrentGradientMachine ###############
# TODO(yuyang18): There is some bug in test_RecurrentGradientMachine
# TODO(yuyang18): There is some bug in test_RecurrentGradientMachine
# I will fix it.
# I will fix it.
add_unittest_without_exec
(
test_RecurrentGradientMachine
add_unittest_without_exec
(
test_RecurrentGradientMachine
test_RecurrentGradientMachine.cpp
)
test_RecurrentGradientMachine.cpp
)
add_test
(
NAME test_RecurrentGradientMachine
add_test
(
NAME test_RecurrentGradientMachine
COMMAND .set_python_path.sh -d
COMMAND .set_python_path.sh -d
${
PADDLE_SOURCE_DIR
}
/python:
${
PADDLE_SOURCE_DIR
}
/paddle/gserver/tests
${
PADDLE_SOURCE_DIR
}
/python:
${
PADDLE_SOURCE_DIR
}
/paddle/gserver/tests
${
CMAKE_CURRENT_BINARY_DIR
}
/test_RecurrentGradientMachine
${
CMAKE_CURRENT_BINARY_DIR
}
/test_RecurrentGradientMachine
WORKING_DIRECTORY
${
PADDLE_SOURCE_DIR
}
/paddle
)
WORKING_DIRECTORY
${
PADDLE_SOURCE_DIR
}
/paddle
)
endif
()
add_unittest_without_exec
(
test_NetworkCompare
test_NetworkCompare.cpp
)
if
(
NOT MOBILE_INFERENCE
)
if
(
WITH_GPU
)
add_unittest_without_exec
(
test_NetworkCompare
add_test
(
NAME test_NetworkCompare
test_NetworkCompare.cpp
)
COMMAND .set_python_path.sh -d
${
PADDLE_SOURCE_DIR
}
/python
${
CMAKE_CURRENT_BINARY_DIR
}
/test_NetworkCompare --use_gpu=true
if
(
WITH_GPU
)
WORKING_DIRECTORY
${
PADDLE_SOURCE_DIR
}
/paddle
)
add_test
(
NAME test_NetworkCompare
else
()
COMMAND .set_python_path.sh -d
${
PADDLE_SOURCE_DIR
}
/python
${
CMAKE_CURRENT_BINARY_DIR
}
/test_NetworkCompare --use_gpu=true
add_test
(
NAME test_NetworkCompare
WORKING_DIRECTORY
${
PADDLE_SOURCE_DIR
}
/paddle
)
COMMAND .set_python_path.sh -d
${
PADDLE_SOURCE_DIR
}
/python
${
CMAKE_CURRENT_BINARY_DIR
}
/test_NetworkCompare --use_gpu=false
else
()
WORKING_DIRECTORY
${
PADDLE_SOURCE_DIR
}
/paddle
)
add_test
(
NAME test_NetworkCompare
COMMAND .set_python_path.sh -d
${
PADDLE_SOURCE_DIR
}
/python
${
CMAKE_CURRENT_BINARY_DIR
}
/test_NetworkCompare --use_gpu=false
WORKING_DIRECTORY
${
PADDLE_SOURCE_DIR
}
/paddle
)
endif
()
endif
()
endif
()
...
...
paddle/gserver/tests/LayerGradUtil.h
浏览文件 @
3a68955f
...
@@ -15,7 +15,6 @@ limitations under the License. */
...
@@ -15,7 +15,6 @@ limitations under the License. */
#pragma once
#pragma once
#include "ModelConfig.pb.h"
#include "ModelConfig.pb.h"
#include "paddle/gserver/layers/DataLayer.h"
#include "paddle/gserver/layers/DataLayer.h"
#include "paddle/trainer/Trainer.h"
#include "paddle/testing/TestUtil.h"
#include "paddle/testing/TestUtil.h"
using
namespace
std
;
// NOLINT
using
namespace
std
;
// NOLINT
...
...
paddle/gserver/tests/test_ActivationGrad.cpp
浏览文件 @
3a68955f
...
@@ -17,7 +17,6 @@ limitations under the License. */
...
@@ -17,7 +17,6 @@ limitations under the License. */
#include <vector>
#include <vector>
#include "ModelConfig.pb.h"
#include "ModelConfig.pb.h"
#include "paddle/gserver/layers/DataLayer.h"
#include "paddle/gserver/layers/DataLayer.h"
#include "paddle/trainer/Trainer.h"
#include "LayerGradUtil.h"
#include "LayerGradUtil.h"
#include "paddle/testing/TestUtil.h"
#include "paddle/testing/TestUtil.h"
...
...
paddle/gserver/tests/test_BatchNorm.cpp
浏览文件 @
3a68955f
...
@@ -17,7 +17,6 @@ limitations under the License. */
...
@@ -17,7 +17,6 @@ limitations under the License. */
#include <vector>
#include <vector>
#include "ModelConfig.pb.h"
#include "ModelConfig.pb.h"
#include "paddle/gserver/layers/DataLayer.h"
#include "paddle/gserver/layers/DataLayer.h"
#include "paddle/trainer/Trainer.h"
#include "paddle/utils/GlobalConstants.h"
#include "paddle/utils/GlobalConstants.h"
#include "LayerGradUtil.h"
#include "LayerGradUtil.h"
...
...
paddle/gserver/tests/test_CRFLayerGrad.cpp
浏览文件 @
3a68955f
...
@@ -16,7 +16,6 @@ limitations under the License. */
...
@@ -16,7 +16,6 @@ limitations under the License. */
#include "ModelConfig.pb.h"
#include "ModelConfig.pb.h"
#include "paddle/gserver/layers/DataLayer.h"
#include "paddle/gserver/layers/DataLayer.h"
#include "paddle/gserver/layers/LinearChainCRF.h"
#include "paddle/gserver/layers/LinearChainCRF.h"
#include "paddle/trainer/Trainer.h"
#include "LayerGradUtil.h"
#include "LayerGradUtil.h"
#include "paddle/testing/TestUtil.h"
#include "paddle/testing/TestUtil.h"
...
...
paddle/gserver/tests/test_ConvTrans.cpp
浏览文件 @
3a68955f
...
@@ -18,7 +18,6 @@ limitations under the License. */
...
@@ -18,7 +18,6 @@ limitations under the License. */
#include "ModelConfig.pb.h"
#include "ModelConfig.pb.h"
#include "paddle/gserver/layers/DataLayer.h"
#include "paddle/gserver/layers/DataLayer.h"
#include "paddle/math/MathUtils.h"
#include "paddle/math/MathUtils.h"
#include "paddle/trainer/Trainer.h"
#include "paddle/utils/GlobalConstants.h"
#include "paddle/utils/GlobalConstants.h"
#include "LayerGradUtil.h"
#include "LayerGradUtil.h"
...
...
paddle/gserver/tests/test_ConvUnify.cpp
浏览文件 @
3a68955f
...
@@ -18,7 +18,6 @@ limitations under the License. */
...
@@ -18,7 +18,6 @@ limitations under the License. */
#include "ModelConfig.pb.h"
#include "ModelConfig.pb.h"
#include "paddle/gserver/layers/DataLayer.h"
#include "paddle/gserver/layers/DataLayer.h"
#include "paddle/math/MathUtils.h"
#include "paddle/math/MathUtils.h"
#include "paddle/trainer/Trainer.h"
#include "paddle/utils/GlobalConstants.h"
#include "paddle/utils/GlobalConstants.h"
#include "LayerGradUtil.h"
#include "LayerGradUtil.h"
...
...
paddle/gserver/tests/test_CrossEntropyOverBeamGrad.cpp
浏览文件 @
3a68955f
...
@@ -18,7 +18,6 @@ limitations under the License. */
...
@@ -18,7 +18,6 @@ limitations under the License. */
#include <gtest/gtest.h>
#include <gtest/gtest.h>
#include "ModelConfig.pb.h"
#include "ModelConfig.pb.h"
#include "paddle/gserver/layers/DataLayer.h"
#include "paddle/gserver/layers/DataLayer.h"
#include "paddle/trainer/Trainer.h"
#include "LayerGradUtil.h"
#include "LayerGradUtil.h"
#include "paddle/testing/TestUtil.h"
#include "paddle/testing/TestUtil.h"
...
...
paddle/gserver/tests/test_KmaxSeqScore.cpp
浏览文件 @
3a68955f
...
@@ -18,7 +18,6 @@ limitations under the License. */
...
@@ -18,7 +18,6 @@ limitations under the License. */
#include <vector>
#include <vector>
#include "ModelConfig.pb.h"
#include "ModelConfig.pb.h"
#include "paddle/gserver/layers/DataLayer.h"
#include "paddle/gserver/layers/DataLayer.h"
#include "paddle/trainer/Trainer.h"
#include "paddle/utils/GlobalConstants.h"
#include "paddle/utils/GlobalConstants.h"
#include "LayerGradUtil.h"
#include "LayerGradUtil.h"
...
...
paddle/gserver/tests/test_LayerGrad.cpp
浏览文件 @
3a68955f
...
@@ -21,7 +21,6 @@ limitations under the License. */
...
@@ -21,7 +21,6 @@ limitations under the License. */
#include "ModelConfig.pb.h"
#include "ModelConfig.pb.h"
#include "paddle/gserver/layers/DataLayer.h"
#include "paddle/gserver/layers/DataLayer.h"
#include "paddle/math/MathUtils.h"
#include "paddle/math/MathUtils.h"
#include "paddle/trainer/Trainer.h"
#include "LayerGradUtil.h"
#include "LayerGradUtil.h"
#include "paddle/testing/TestUtil.h"
#include "paddle/testing/TestUtil.h"
...
...
paddle/gserver/tests/test_SelectiveFCLayer.cpp
浏览文件 @
3a68955f
...
@@ -24,7 +24,6 @@ limitations under the License. */
...
@@ -24,7 +24,6 @@ limitations under the License. */
#include "paddle/gserver/layers/Layer.h"
#include "paddle/gserver/layers/Layer.h"
#include "paddle/gserver/layers/SelectiveFullyConnectedLayer.h"
#include "paddle/gserver/layers/SelectiveFullyConnectedLayer.h"
#include "paddle/math/CpuSparseMatrix.h"
#include "paddle/math/CpuSparseMatrix.h"
#include "paddle/trainer/Trainer.h"
using
namespace
paddle
;
// NOLINT
using
namespace
paddle
;
// NOLINT
using
namespace
std
;
// NOLINT
using
namespace
std
;
// NOLINT
...
...
paddle/gserver/tests/test_SeqSliceLayerGrad.cpp
浏览文件 @
3a68955f
...
@@ -15,7 +15,6 @@ limitations under the License. */
...
@@ -15,7 +15,6 @@ limitations under the License. */
#include <gtest/gtest.h>
#include <gtest/gtest.h>
#include "ModelConfig.pb.h"
#include "ModelConfig.pb.h"
#include "paddle/gserver/layers/DataLayer.h"
#include "paddle/gserver/layers/DataLayer.h"
#include "paddle/trainer/Trainer.h"
#include "LayerGradUtil.h"
#include "LayerGradUtil.h"
#include "paddle/testing/TestUtil.h"
#include "paddle/testing/TestUtil.h"
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录