Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
s920243400
PaddleDetection
提交
635b8672
P
PaddleDetection
项目概览
s920243400
/
PaddleDetection
与 Fork 源项目一致
Fork自
PaddlePaddle / PaddleDetection
通知
2
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleDetection
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
635b8672
编写于
8月 17, 2017
作者:
T
tensor-tang
浏览文件
操作
浏览文件
下载
差异文件
Merge remote-tracking branch 'upstream/develop' into remove-flag
上级
e08651f9
9806e7f2
变更
26
显示空白变更内容
内联
并排
Showing
26 changed file
with
423 addition
and
257 deletion
+423
-257
Dockerfile
Dockerfile
+0
-14
cmake/flags.cmake
cmake/flags.cmake
+1
-8
doc/design/mkldnn/README.MD
doc/design/mkldnn/README.MD
+1
-0
doc/getstarted/build_and_install/build_from_source_en.md
doc/getstarted/build_and_install/build_from_source_en.md
+7
-6
paddle/capi/gradient_machine.cpp
paddle/capi/gradient_machine.cpp
+16
-0
paddle/capi/gradient_machine.h
paddle/capi/gradient_machine.h
+17
-1
paddle/framework/op_registry.cc
paddle/framework/op_registry.cc
+45
-1
paddle/framework/op_registry.h
paddle/framework/op_registry.h
+21
-137
paddle/framework/operator.cc
paddle/framework/operator.cc
+38
-0
paddle/framework/operator.h
paddle/framework/operator.h
+93
-5
paddle/framework/operator_test.cc
paddle/framework/operator_test.cc
+18
-0
paddle/memory/CMakeLists.txt
paddle/memory/CMakeLists.txt
+1
-1
paddle/memory/memcpy.cc
paddle/memory/memcpy.cc
+0
-2
paddle/operators/net_op.cc
paddle/operators/net_op.cc
+8
-1
paddle/operators/net_op.h
paddle/operators/net_op.h
+14
-0
paddle/operators/net_op_test.cc
paddle/operators/net_op_test.cc
+17
-0
paddle/operators/recurrent_op.h
paddle/operators/recurrent_op.h
+18
-4
paddle/platform/CMakeLists.txt
paddle/platform/CMakeLists.txt
+4
-1
paddle/platform/device_context.cc
paddle/platform/device_context.cc
+65
-14
paddle/platform/device_context.h
paddle/platform/device_context.h
+9
-4
paddle/platform/device_context_test.cc
paddle/platform/device_context_test.cc
+1
-0
paddle/platform/enforce.h
paddle/platform/enforce.h
+1
-1
paddle/scripts/docker/build.sh
paddle/scripts/docker/build.sh
+3
-23
paddle/scripts/submit_local.sh.in
paddle/scripts/submit_local.sh.in
+5
-24
python/paddle/v2/trainer.py
python/paddle/v2/trainer.py
+11
-3
python/setup.py.in
python/setup.py.in
+9
-7
未找到文件。
Dockerfile
浏览文件 @
635b8672
...
...
@@ -71,20 +71,6 @@ RUN pip install -r /root/requirements.txt
RUN
apt-get
install
-y
libssl-dev libffi-dev
RUN
pip
install
certifi urllib3[secure]
# TODO(qijun) The template library Eigen doesn't work well with GCC 5
# coming with the default Docker image, so we switch to use GCC 4.8
# by default. And I will check Eigen library later.
RUN
ln
-sf
gcc-4.8 /usr/bin/gcc
&&
\
ln
-sf
gcc-ar-4.8 /usr/bin/gcc-ar
&&
\
ln
-sf
gcc-nm-4.8 /usr/bin/gcc-nm
&&
\
ln
-sf
gcc-ranlib-4.8 /usr/bin/gcc-ranlib
&&
\
ln
-sf
gcc-4.8 /usr/bin/x86_64-linux-gnu-gcc
&&
\
ln
-sf
gcc-ar-4.8 /usr/bin/x86_64-linux-gnu-gcc-ar
&&
\
ln
-sf
gcc-nm-4.8 /usr/bin/x86_64-linux-gnu-gcc-nm
&&
\
ln
-sf
gcc-ranlib-4.8 /usr/bin/x86_64-linux-gnu-gcc-ranlib
&&
\
ln
-sf
g++-4.8 /usr/bin/g++
&&
\
ln
-sf
g++-4.8 /usr/bin/x86_64-linux-gnu-g++
# Install woboq_codebrowser to /woboq
RUN
git clone https://github.com/woboq/woboq_codebrowser /woboq
&&
\
...
...
cmake/flags.cmake
浏览文件 @
635b8672
...
...
@@ -9,13 +9,6 @@ function(CheckCompilerCXX11Flag)
if
(
${
CMAKE_CXX_COMPILER_VERSION
}
VERSION_LESS 4.8
)
message
(
FATAL_ERROR
"Unsupported GCC version. GCC >= 4.8 required."
)
endif
()
if
(
NOT ANDROID
)
# TODO(qijun) gcc 4.9 or later versions raise SEGV due to the optimization problem.
# Use Debug mode instead for now.
if
(
CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 4.9 OR CMAKE_CXX_COMPILER_VERSION VERSION_EQUAL 4.9
)
set
(
CMAKE_BUILD_TYPE
"Debug"
CACHE STRING
""
FORCE
)
endif
()
endif
()
elseif
(
CMAKE_CXX_COMPILER_ID STREQUAL
"AppleClang"
OR CMAKE_CXX_COMPILER_ID STREQUAL
"Clang"
)
# cmake >= 3.0 compiler id "AppleClang" on Mac OS X, otherwise "Clang"
# Apple Clang is a different compiler than upstream Clang which havs different version numbers.
...
...
@@ -160,7 +153,7 @@ set(CUDA_PROPAGATE_HOST_FLAGS OFF)
# Release/Debug flags set by cmake. Such as -O3 -g -DNDEBUG etc.
# So, don't set these flags here.
LIST
(
APPEND CUDA_NVCC_FLAGS -std=c++11
--default-stream per-thread
)
LIST
(
APPEND CUDA_NVCC_FLAGS -std=c++11
)
LIST
(
APPEND CUDA_NVCC_FLAGS --use_fast_math
)
if
(
CMAKE_BUILD_TYPE STREQUAL
"Debug"
)
...
...
doc/design/mkldnn/README.MD
浏览文件 @
635b8672
...
...
@@ -101,6 +101,7 @@ if use_mkldnn
5.
在
**Argument**
里添加两个
`MkldnnMatrixPtr`
,取名为
`mkldnnValue`
和
`mkldnnGrad`
,用于存放
`MkldnnLayer`
会用到的memory buffer。 并且添加函数cvt(会修改为一个更加合适的函数名),用于处理"CPU device"和"MKL-DNN device"之间memory的相互转化。
6.
在父类
`Layer`
中的
`getOutput`
函数中添加一段逻辑,用于判断
`deviceId`
,并针对device在MKL-DNN和CPU之间不统一的情况,做一个前期转换。 也就是调用
`Argument`
的cvt函数把output统一到需要的device上。
7.
在原来的
`FLAGS`
中添加一个
`use_mkldnn`
的flag,用于选择是否使用MKL-DNN的相关功能。
8.
关于MKLDNN参数的保存。由于MKLDNN参数的格式与PaddlePaddle原有的格式存在不一样的情况,所以需要在保存参数时同时保存该格式信息。目前准备扩展
[
Header
](
https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/parameter/Parameter.h#L247
)
里面的
`int32_t version`
。这个值不管是在v1还是在v2里面,一直保存的是0,所以可以充分利用这个信息,定义一个枚举处理所有MKLDNN的参数格式,从而
`MKLDNNLayer`
就可以从输入的参数中获取需要的格式信息。
## References
...
...
doc/getstarted/build_and_install/build_from_source_en.md
浏览文件 @
635b8672
...
...
@@ -68,7 +68,7 @@ As a simple example, consider the following:
1.
**BLAS Dependencies(optional)**
CMake will search BLAS libraries from system. If not found, OpenBLAS will be downloaded, built and installed automatically.
CMake will search BLAS libraries from
the
system. If not found, OpenBLAS will be downloaded, built and installed automatically.
To utilize preinstalled BLAS, you can simply specify MKL, OpenBLAS or ATLAS via
`MKL_ROOT`
,
`OPENBLAS_ROOT`
or
`ATLAS_ROOT`
.
```bash
...
...
@@ -131,9 +131,9 @@ As a simple example, consider the following:
To build GPU version, you will need the following installed:
1. a CUDA-capable GPU
2. A supported version of Linux with a
gcc
compiler and toolchain
2. A supported version of Linux with a
GCC
compiler and toolchain
3. NVIDIA CUDA Toolkit (available at http://developer.nvidia.com/cuda-downloads)
4. NVIDIA cuDNN Library (availab
el
at https://developer.nvidia.com/cudnn)
4. NVIDIA cuDNN Library (availab
le
at https://developer.nvidia.com/cudnn)
The CUDA development environment relies on tight integration with the host development environment,
including the host compiler and C runtime libraries, and is therefore only supported on
...
...
@@ -172,6 +172,7 @@ export PATH=<path to install>/bin:$PATH
# install PaddlePaddle Python modules.
sudo
pip
install
<path to
install
>
/opt/paddle/share/wheels/
*
.whl
```
## <span id="centos">Build on Centos 7</span>
### Install Dependencies
...
...
@@ -192,9 +193,9 @@ sudo pip install <path to install>/opt/paddle/share/wheels/*.whl
To build GPU version, you will need the following installed:
1. a CUDA-capable GPU
2. A supported version of Linux with a
gcc
compiler and toolchain
2. A supported version of Linux with a
GCC
compiler and toolchain
3. NVIDIA CUDA Toolkit (available at http://developer.nvidia.com/cuda-downloads)
4. NVIDIA cuDNN Library (availab
el
at https://developer.nvidia.com/cudnn)
4. NVIDIA cuDNN Library (availab
le
at https://developer.nvidia.com/cudnn)
The CUDA development environment relies on tight integration with the host development environment,
including the host compiler and C runtime libraries, and is therefore only supported on
...
...
paddle/capi/gradient_machine.cpp
浏览文件 @
635b8672
...
...
@@ -146,3 +146,19 @@ paddle_error paddle_gradient_machine_randomize_param(
m
->
machine
->
randParameters
();
return
kPD_NO_ERROR
;
}
paddle_error
paddle_gradient_machine_get_layer_output
(
paddle_gradient_machine
machine
,
const
char
*
layerName
,
paddle_arguments
args
)
{
auto
m
=
cast
(
machine
);
auto
out
=
paddle
::
capi
::
cast
<
paddle
::
capi
::
CArguments
>
(
args
);
if
(
m
==
nullptr
||
layerName
==
nullptr
||
out
==
nullptr
||
m
->
machine
==
nullptr
)
{
return
kPD_NULLPTR
;
}
auto
layerOutput
=
m
->
machine
->
getLayerOutput
(
layerName
);
out
->
args
.
push_back
(
layerOutput
);
return
kPD_NO_ERROR
;
}
paddle/capi/gradient_machine.h
浏览文件 @
635b8672
...
...
@@ -39,7 +39,11 @@ PD_API paddle_error paddle_gradient_machine_create_for_inference(
/**
* @brief Create a gradient machine used for model inference, using config with
* parameters which is generated by `paddle merge_model`.
* @param [out] machine that used for model inference.
* Example:
* paddle merge_model \
* --model_dir="pass-00000" \
* --model_file="merged_model.paddle"
* @param [out] machine that used for model inference
* @param [in] mergedModel
* @param [in] size
* @return paddle_error
...
...
@@ -97,6 +101,18 @@ paddle_gradient_machine_randomize_param(paddle_gradient_machine machine);
PD_API
paddle_error
paddle_gradient_machine_destroy
(
paddle_gradient_machine
machine
);
/**
* @brief Get the output of the layer named `layerName`.
* @param [in] gradient machine that have run a inference
* @param [in] layerName name of specified layer
* @param [out] args output of the specified layer
* @return paddle_error
*/
PD_API
paddle_error
paddle_gradient_machine_get_layer_output
(
paddle_gradient_machine
machine
,
const
char
*
layerName
,
paddle_arguments
args
);
#ifdef __cplusplus
}
#endif
...
...
paddle/framework/op_registry.cc
浏览文件 @
635b8672
...
...
@@ -17,5 +17,49 @@ limitations under the License. */
#include <vector>
namespace
paddle
{
namespace
framework
{}
// namespace framework
namespace
framework
{
std
::
shared_ptr
<
OperatorBase
>
OpRegistry
::
CreateOp
(
const
std
::
string
&
type
,
const
VarNameMap
&
inputs
,
const
VarNameMap
&
outputs
,
AttributeMap
attrs
)
{
auto
it
=
op_info_map
().
find
(
type
);
PADDLE_ENFORCE
(
it
!=
op_info_map
().
end
(),
"Operator '%s' has not been registered."
,
type
);
it
->
second
.
checker_
->
Check
(
attrs
);
auto
op
=
it
->
second
.
creator_
(
type
,
inputs
,
outputs
,
attrs
);
return
std
::
shared_ptr
<
OperatorBase
>
(
op
);
}
std
::
shared_ptr
<
OperatorBase
>
OpRegistry
::
CreateOp
(
const
OpDesc
&
op_desc
)
{
VarNameMap
inputs
=
ConvertOpDescVarsToVarNameMap
(
op_desc
.
inputs
());
VarNameMap
outputs
=
ConvertOpDescVarsToVarNameMap
(
op_desc
.
outputs
());
AttributeMap
attrs
;
for
(
auto
&
attr
:
op_desc
.
attrs
())
{
attrs
[
attr
.
name
()]
=
GetAttrValue
(
attr
);
}
return
CreateOp
(
op_desc
.
type
(),
inputs
,
outputs
,
attrs
);
}
OperatorBase
::
VarNameMap
OpRegistry
::
ConvertOpDescVarsToVarNameMap
(
const
google
::
protobuf
::
RepeatedPtrField
<
OpDesc
::
Var
>&
op_desc_vars
)
{
VarNameMap
ret_val
;
for
(
auto
&
var
:
op_desc_vars
)
{
auto
&
var_names
=
ret_val
[
var
.
parameter
()];
auto
&
var_names_in_proto
=
var
.
arguments
();
var_names
.
reserve
(
static_cast
<
size_t
>
(
var_names_in_proto
.
size
()));
std
::
copy
(
var_names_in_proto
.
begin
(),
var_names_in_proto
.
end
(),
std
::
back_inserter
(
var_names
));
}
return
ret_val
;
}
std
::
shared_ptr
<
OperatorBase
>
OpRegistry
::
CreateGradOp
(
const
OperatorBase
&
op
)
{
PADDLE_ENFORCE
(
!
op
.
IsNetOp
(),
"Use framework::Backward to get backward ops"
);
std
::
shared_ptr
<
OperatorBase
>
grad_op
(
BuildGradOp
(
&
op
));
return
grad_op
;
}
}
// namespace framework
}
// namespace paddle
paddle/framework/op_registry.h
浏览文件 @
635b8672
...
...
@@ -29,103 +29,6 @@ limitations under the License. */
namespace
paddle
{
namespace
framework
{
// this class not only make proto but also init attribute checkers.
class
OpProtoAndCheckerMaker
{
public:
OpProtoAndCheckerMaker
(
OpProto
*
proto
,
OpAttrChecker
*
op_checker
)
:
proto_
(
proto
),
op_checker_
(
op_checker
)
{}
~
OpProtoAndCheckerMaker
()
{
PADDLE_ENFORCE
(
validated_
,
"should call Validate after build"
);
}
void
Validate
()
{
validated_
=
true
;
CheckNoDuplicatedInOutAttrs
();
}
protected:
struct
VariableBuilder
{
OpProto
::
Var
*
var_
;
VariableBuilder
&
AsDuplicable
()
{
var_
->
set_duplicable
(
true
);
return
*
this
;
}
VariableBuilder
&
AsIntermediate
()
{
var_
->
set_intermediate
(
true
);
return
*
this
;
}
// TODO(FengJiayi, yuyang18): `AsNoGradient` is a very bad name, because it
// means that input/output is not needed when calculate gradient. It does
// not mean no gradient when backward. It should be changed soon.
VariableBuilder
&
AsNoGradient
()
{
var_
->
set_no_gradient
(
true
);
return
*
this
;
}
};
VariableBuilder
AddInput
(
const
std
::
string
&
name
,
const
std
::
string
&
comment
)
{
auto
*
input
=
proto_
->
add_inputs
();
input
->
set_name
(
name
);
input
->
set_comment
(
comment
);
return
VariableBuilder
{
input
};
}
VariableBuilder
AddOutput
(
const
std
::
string
&
name
,
const
std
::
string
&
comment
)
{
auto
*
output
=
proto_
->
add_outputs
();
output
->
set_name
(
name
);
output
->
set_comment
(
comment
);
return
VariableBuilder
{
output
};
}
template
<
typename
T
>
TypedAttrChecker
<
T
>&
AddAttr
(
const
std
::
string
&
name
,
const
std
::
string
&
comment
,
bool
generated
=
false
)
{
auto
*
attr
=
proto_
->
add_attrs
();
attr
->
set_name
(
name
);
attr
->
set_comment
(
comment
);
attr
->
set_generated
(
generated
);
attr
->
set_type
(
AttrTypeID
<
T
>
());
return
op_checker_
->
AddAttrChecker
<
T
>
(
name
);
}
void
AddComment
(
const
std
::
string
&
comment
)
{
proto_
->
set_comment
(
comment
);
}
private:
void
CheckNoDuplicatedInOutAttrs
()
{
std
::
unordered_set
<
std
::
string
>
names
;
auto
checker
=
[
&
](
const
std
::
string
&
name
)
{
PADDLE_ENFORCE
(
!
names
.
count
(
name
),
"[%s] is duplicated"
,
name
);
names
.
insert
(
name
);
};
for
(
auto
&
attr
:
proto_
->
attrs
())
{
checker
(
attr
.
name
());
}
for
(
auto
&
input
:
proto_
->
inputs
())
{
checker
(
input
.
name
());
}
for
(
auto
&
output
:
proto_
->
outputs
())
{
checker
(
output
.
name
());
}
}
OpProto
*
proto_
;
OpAttrChecker
*
op_checker_
;
bool
validated_
{
false
};
};
class
NOPMaker
:
public
OpProtoAndCheckerMaker
{
public:
NOPMaker
(
framework
::
OpProto
*
proto
,
framework
::
OpAttrChecker
*
op_checker
)
:
OpProtoAndCheckerMaker
(
proto
,
op_checker
)
{}
};
class
OpRegistry
{
using
VarNameMap
=
OperatorBase
::
VarNameMap
;
using
OpCreator
=
std
::
function
<
OperatorBase
*
(
...
...
@@ -177,45 +80,14 @@ class OpRegistry {
static
std
::
shared_ptr
<
OperatorBase
>
CreateOp
(
const
std
::
string
&
type
,
const
VarNameMap
&
inputs
,
const
VarNameMap
&
outputs
,
AttributeMap
attrs
)
{
auto
it
=
op_info_map
().
find
(
type
);
PADDLE_ENFORCE
(
it
!=
op_info_map
().
end
(),
"Operator '%s' has not been registered."
,
type
);
it
->
second
.
checker_
->
Check
(
attrs
);
auto
op
=
it
->
second
.
creator_
(
type
,
inputs
,
outputs
,
attrs
);
return
std
::
shared_ptr
<
OperatorBase
>
(
op
);
}
static
VarNameMap
ConvertOpDescVarsToVarNameMap
(
const
google
::
protobuf
::
RepeatedPtrField
<
OpDesc
::
Var
>&
op_desc_vars
)
{
VarNameMap
ret_val
;
for
(
auto
&
var
:
op_desc_vars
)
{
auto
&
var_names
=
ret_val
[
var
.
parameter
()];
auto
&
var_names_in_proto
=
var
.
arguments
();
var_names
.
reserve
(
static_cast
<
size_t
>
(
var_names_in_proto
.
size
()));
std
::
copy
(
var_names_in_proto
.
begin
(),
var_names_in_proto
.
end
(),
std
::
back_inserter
(
var_names
));
}
return
ret_val
;
}
AttributeMap
attrs
);
static
std
::
shared_ptr
<
OperatorBase
>
CreateOp
(
const
OpDesc
&
op_desc
)
{
VarNameMap
inputs
=
ConvertOpDescVarsToVarNameMap
(
op_desc
.
inputs
());
VarNameMap
outputs
=
ConvertOpDescVarsToVarNameMap
(
op_desc
.
outputs
());
AttributeMap
attrs
;
for
(
auto
&
attr
:
op_desc
.
attrs
())
{
attrs
[
attr
.
name
()]
=
GetAttrValue
(
attr
);
}
static
std
::
shared_ptr
<
OperatorBase
>
CreateOp
(
const
OpDesc
&
op_desc
);
return
CreateOp
(
op_desc
.
type
(),
inputs
,
outputs
,
attrs
);
}
static
VarNameMap
ConvertOpDescVarsToVarNameMap
(
const
google
::
protobuf
::
RepeatedPtrField
<
OpDesc
::
Var
>&
op_desc_vars
);
static
std
::
shared_ptr
<
OperatorBase
>
CreateGradOp
(
const
OperatorBase
&
op
)
{
PADDLE_ENFORCE
(
!
op
.
IsNetOp
(),
"Use framework::Backward to get backward ops"
);
std
::
shared_ptr
<
OperatorBase
>
grad_op
(
BuildGradOp
(
&
op
));
return
grad_op
;
}
static
std
::
shared_ptr
<
OperatorBase
>
CreateGradOp
(
const
OperatorBase
&
op
);
static
std
::
unordered_map
<
std
::
string
,
const
OpInfo
>&
op_info_map
()
{
static
std
::
unordered_map
<
std
::
string
,
const
OpInfo
>
op_info_map_
;
...
...
@@ -272,8 +144,18 @@ class OpKernelRegistrar : public Registrar {
grad_op_class) \
STATIC_ASSERT_GLOBAL_NAMESPACE( \
__reg_op__##op_type, "REGISTER_OP must be called in global namespace"); \
static ::paddle::framework::OpRegistrar<op_class, op_maker_class, \
grad_op_class> \
class _OpClass_##op_type##_ : public op_class { \
public: \
DEFINE_OP_CLONE_METHOD(_OpClass_##op_type##_); \
DEFINE_OP_CONSTRUCTOR(_OpClass_##op_type##_, op_class); \
}; \
class _OpGradClass_##op_type##_ : public grad_op_class { \
public: \
DEFINE_OP_CLONE_METHOD(_OpGradClass_##op_type##_); \
DEFINE_OP_CONSTRUCTOR(_OpGradClass_##op_type##_, grad_op_class); \
}; \
static ::paddle::framework::OpRegistrar< \
_OpClass_##op_type##_, op_maker_class, _OpGradClass_##op_type##_> \
__op_registrar_##op_type##__(#op_type, #grad_op_type); \
int TouchOpRegistrar_##op_type() { \
__op_registrar_##op_type##__.Touch(); \
...
...
@@ -304,7 +186,8 @@ class OpKernelRegistrar : public Registrar {
REGISTER_OP_KERNEL(op_type, CPU, ::paddle::platform::CPUPlace, __VA_ARGS__)
/**
* Macro to mark what Operator and Kernel we will use and tell the compiler to
* Macro to mark what Operator and Kernel
* we will use and tell the compiler to
* link them into target.
*/
#define USE_OP_ITSELF(op_type) \
...
...
@@ -324,7 +207,8 @@ class OpKernelRegistrar : public Registrar {
__attribute__((unused)) = \
TouchOpKernelRegistrar_##op_type##_##DEVICE_TYPE()
// TODO(fengjiayi): The following macros seems ugly, do we have better method?
// TODO(fengjiayi): The following macros
// seems ugly, do we have better method?
#ifdef PADDLE_ONLY_CPU
#define USE_OP_KERNEL(op_type) USE_OP_DEVICE_KERNEL(op_type, CPU)
...
...
paddle/framework/operator.cc
浏览文件 @
635b8672
...
...
@@ -164,5 +164,43 @@ std::vector<std::string> OperatorBase::OutputVars(bool has_intermediate) const {
return
ret_val
;
}
void
OpProtoAndCheckerMaker
::
Validate
()
{
validated_
=
true
;
CheckNoDuplicatedInOutAttrs
();
}
OpProtoAndCheckerMaker
::
VariableBuilder
OpProtoAndCheckerMaker
::
AddInput
(
const
std
::
string
&
name
,
const
std
::
string
&
comment
)
{
auto
*
input
=
proto_
->
add_inputs
();
input
->
set_name
(
name
);
input
->
set_comment
(
comment
);
return
OpProtoAndCheckerMaker
::
VariableBuilder
{
input
};
}
OpProtoAndCheckerMaker
::
VariableBuilder
OpProtoAndCheckerMaker
::
AddOutput
(
const
std
::
string
&
name
,
const
std
::
string
&
comment
)
{
auto
*
output
=
proto_
->
add_outputs
();
output
->
set_name
(
name
);
output
->
set_comment
(
comment
);
return
OpProtoAndCheckerMaker
::
VariableBuilder
{
output
};
}
void
OpProtoAndCheckerMaker
::
CheckNoDuplicatedInOutAttrs
()
{
std
::
unordered_set
<
std
::
string
>
names
;
auto
checker
=
[
&
](
const
std
::
string
&
name
)
{
PADDLE_ENFORCE
(
!
names
.
count
(
name
),
"[%s] is duplicated"
,
name
);
names
.
insert
(
name
);
};
for
(
auto
&
attr
:
proto_
->
attrs
())
{
checker
(
attr
.
name
());
}
for
(
auto
&
input
:
proto_
->
inputs
())
{
checker
(
input
.
name
());
}
for
(
auto
&
output
:
proto_
->
outputs
())
{
checker
(
output
.
name
());
}
}
}
// namespace framework
}
// namespace paddle
paddle/framework/operator.h
浏览文件 @
635b8672
...
...
@@ -67,10 +67,6 @@ class OperatorBase {
OperatorBase
(
const
std
::
string
&
type
,
const
VarNameMap
&
inputs
,
const
VarNameMap
&
outputs
,
const
AttributeMap
&
attrs
);
OperatorBase
(
const
OperatorBase
&
o
)
=
delete
;
OperatorBase
&
operator
=
(
const
OperatorBase
&
o
)
=
delete
;
OperatorBase
(
OperatorBase
&&
o
)
=
delete
;
virtual
~
OperatorBase
()
{}
template
<
typename
T
>
...
...
@@ -116,10 +112,14 @@ class OperatorBase {
void
SetType
(
const
std
::
string
&
type
)
{
type_
=
type
;
}
const
AttributeMap
&
Attrs
()
const
{
return
attrs_
;
}
// Return a new operator instance, which is as same as this.
// Use unique_ptr to prevent caller forget to delete this pointer.
virtual
std
::
unique_ptr
<
OperatorBase
>
Clone
()
const
=
0
;
protected:
std
::
string
type_
;
// NOTE: in case of OpGrad, inputs_ contains:
// I (Inputs)
// I (Inputs)
opear
// O (Outputs)
// OG (Output Gradients)
VarNameMap
inputs_
;
...
...
@@ -130,12 +130,100 @@ class OperatorBase {
AttributeMap
attrs_
;
};
// Macro for define a clone method.
// If you are writing an kernel operator, `Clone` will be defined when you
// register it. i.e. `Clone` method is not needed to define by yourself.
#define DEFINE_OP_CLONE_METHOD(CLS) \
std::unique_ptr<OperatorBase> Clone() const final { \
return std::unique_ptr<OperatorBase>(new CLS(*this)); \
}
// Macro for define a default constructor for Operator.
// You can also use
// using PARENT_CLASS::PARENT_CLASS;
// to use parent's constructor.
#define DEFINE_OP_CONSTRUCTOR(CLS, PARENT_CLS) \
CLS(const std::string& type, const VarNameMap& inputs, \
const VarNameMap& outputs, const paddle::framework::AttributeMap& attrs) \
: PARENT_CLS(type, inputs, outputs, attrs) {}
class
NOP
:
public
OperatorBase
{
public:
using
OperatorBase
::
OperatorBase
;
void
InferShape
(
const
Scope
&
scope
)
const
override
{}
void
Run
(
const
Scope
&
scope
,
const
platform
::
DeviceContext
&
dev_ctx
)
const
override
{}
std
::
unique_ptr
<
OperatorBase
>
Clone
()
const
override
{
return
std
::
unique_ptr
<
OperatorBase
>
(
new
NOP
(
*
this
));
}
};
// this class not only make proto but also init attribute checkers.
class
OpProtoAndCheckerMaker
{
public:
OpProtoAndCheckerMaker
(
OpProto
*
proto
,
OpAttrChecker
*
op_checker
)
:
proto_
(
proto
),
op_checker_
(
op_checker
)
{}
~
OpProtoAndCheckerMaker
()
{
PADDLE_ENFORCE
(
validated_
,
"should call Validate after build"
);
}
void
Validate
();
protected:
struct
VariableBuilder
{
OpProto
::
Var
*
var_
;
VariableBuilder
&
AsDuplicable
()
{
var_
->
set_duplicable
(
true
);
return
*
this
;
}
VariableBuilder
&
AsIntermediate
()
{
var_
->
set_intermediate
(
true
);
return
*
this
;
}
// TODO(FengJiayi, yuyang18): `AsNoGradient` is a very bad name, because it
// means that input/output is not needed when calculate gradient. It does
// not mean no gradient when backward. It should be changed soon.
VariableBuilder
&
AsNoGradient
()
{
var_
->
set_no_gradient
(
true
);
return
*
this
;
}
};
VariableBuilder
AddInput
(
const
std
::
string
&
name
,
const
std
::
string
&
comment
);
VariableBuilder
AddOutput
(
const
std
::
string
&
name
,
const
std
::
string
&
comment
);
template
<
typename
T
>
TypedAttrChecker
<
T
>&
AddAttr
(
const
std
::
string
&
name
,
const
std
::
string
&
comment
,
bool
generated
=
false
)
{
auto
*
attr
=
proto_
->
add_attrs
();
attr
->
set_name
(
name
);
attr
->
set_comment
(
comment
);
attr
->
set_generated
(
generated
);
attr
->
set_type
(
AttrTypeID
<
T
>
());
return
op_checker_
->
AddAttrChecker
<
T
>
(
name
);
}
void
AddComment
(
const
std
::
string
&
comment
)
{
proto_
->
set_comment
(
comment
);
}
private:
void
CheckNoDuplicatedInOutAttrs
();
OpProto
*
proto_
;
OpAttrChecker
*
op_checker_
;
bool
validated_
{
false
};
};
class
NOPMaker
:
public
OpProtoAndCheckerMaker
{
public:
NOPMaker
(
framework
::
OpProto
*
proto
,
framework
::
OpAttrChecker
*
op_checker
)
:
OpProtoAndCheckerMaker
(
proto
,
op_checker
)
{}
};
class
InferShapeContext
{
...
...
paddle/framework/operator_test.cc
浏览文件 @
635b8672
...
...
@@ -245,3 +245,21 @@ TEST(OpKernel, multi_inputs) {
auto
op
=
paddle
::
framework
::
OpRegistry
::
CreateOp
(
op_desc
);
op
->
Run
(
scope
,
cpu_device_context
);
}
class
OperatorClone
:
public
paddle
::
framework
::
OperatorBase
{
public:
DEFINE_OP_CLONE_METHOD
(
OperatorClone
);
OperatorClone
(
const
std
::
string
&
type
,
const
VarNameMap
&
inputs
,
const
VarNameMap
&
outputs
,
const
paddle
::
framework
::
AttributeMap
&
attrs
)
:
OperatorBase
(
type
,
inputs
,
outputs
,
attrs
)
{}
void
InferShape
(
const
paddle
::
framework
::
Scope
&
scope
)
const
override
{}
void
Run
(
const
paddle
::
framework
::
Scope
&
scope
,
const
paddle
::
platform
::
DeviceContext
&
dev_ctx
)
const
override
{}
};
TEST
(
Operator
,
Clone
)
{
OperatorClone
a
(
"ABC"
,
{},
{},
{});
auto
b
=
a
.
Clone
();
ASSERT_EQ
(
a
.
Type
(),
b
->
Type
());
}
\ No newline at end of file
paddle/memory/CMakeLists.txt
浏览文件 @
635b8672
add_subdirectory
(
detail
)
cc_library
(
memory SRCS memory.cc
)
cc_library
(
memcpy SRCS memcpy.cc
DEPS device_context
)
cc_library
(
memcpy SRCS memcpy.cc
)
cc_library
(
paddle_memory
DEPS
...
...
paddle/memory/memcpy.cc
浏览文件 @
635b8672
...
...
@@ -16,8 +16,6 @@ limitations under the License. */
#include <cstring> // for memcpy
#include "paddle/platform/device_context.h"
namespace
paddle
{
namespace
memory
{
...
...
paddle/operators/net_op.cc
浏览文件 @
635b8672
...
...
@@ -85,7 +85,14 @@ NetOp::NetOp(const std::string& type,
const
framework
::
OperatorBase
::
VarNameMap
&
inputs
,
const
framework
::
OperatorBase
::
VarNameMap
&
outputs
,
const
framework
::
AttributeMap
&
attrs
)
:
OperatorBase
(
type
,
inputs
,
outputs
,
attrs
)
{}
:
framework
::
OperatorBase
(
type
,
inputs
,
outputs
,
attrs
)
{}
std
::
unique_ptr
<
framework
::
OperatorBase
>
NetOp
::
Clone
()
const
{
PADDLE_ENFORCE
(
add_op_done_
,
"Must clone a sealed NetOp, invoke Net::CompleteAddOp before clone"
);
return
std
::
unique_ptr
<
OperatorBase
>
(
new
NetOp
(
*
this
));
}
}
// namespace operators
}
// namespace paddle
paddle/operators/net_op.h
浏览文件 @
635b8672
...
...
@@ -41,6 +41,18 @@ class NetOp : public framework::OperatorBase {
NetOp
(
const
std
::
string
&
type
,
const
VarNameMap
&
inputs
,
const
VarNameMap
&
outputs
,
const
framework
::
AttributeMap
&
attrs
);
NetOp
(
const
NetOp
&
o
)
:
framework
::
OperatorBase
(
static_cast
<
const
framework
::
OperatorBase
&>
(
o
))
{
this
->
ops_
.
reserve
(
o
.
ops_
.
size
());
std
::
transform
(
o
.
ops_
.
begin
(),
o
.
ops_
.
end
(),
std
::
back_inserter
(
this
->
ops_
),
[](
const
std
::
shared_ptr
<
OperatorBase
>&
op
)
->
std
::
shared_ptr
<
OperatorBase
>
{
return
std
::
shared_ptr
<
OperatorBase
>
(
op
->
Clone
());
});
this
->
CompleteAddOp
();
}
/**
* Infer all the operators' input and output variables' shapes, will be called
* before every mini-batch
...
...
@@ -98,6 +110,8 @@ class NetOp : public framework::OperatorBase {
bool
IsNetOp
()
const
override
;
std
::
vector
<
std
::
string
>
OutputVars
(
bool
has_intermediate
)
const
override
;
std
::
unique_ptr
<
framework
::
OperatorBase
>
Clone
()
const
override
;
std
::
vector
<
std
::
shared_ptr
<
OperatorBase
>>
ops_
;
private:
...
...
paddle/operators/net_op_test.cc
浏览文件 @
635b8672
...
...
@@ -13,6 +13,7 @@ static int run_cnt = 0;
class
TestOp
:
public
framework
::
OperatorBase
{
public:
using
framework
::
OperatorBase
::
OperatorBase
;
DEFINE_OP_CLONE_METHOD
(
TestOp
);
void
InferShape
(
const
Scope
&
scope
)
const
override
{
++
infer_shape_cnt
;
}
void
Run
(
const
Scope
&
scope
,
const
platform
::
DeviceContext
&
dev_ctx
)
const
override
{
...
...
@@ -70,5 +71,21 @@ TEST(NetOp, insert_op) {
ASSERT_EQ
(
3UL
,
net
.
ops_
.
size
());
}
TEST
(
NetOp
,
Clone
)
{
NetOp
net
;
net
.
AddOp
(
std
::
shared_ptr
<
framework
::
NOP
>
(
new
framework
::
NOP
{
"empty"
,
{},
{},
{}}));
net
.
AddOp
(
std
::
shared_ptr
<
framework
::
NOP
>
(
new
framework
::
NOP
{
"empty2"
,
{},
{},
{}}));
net
.
CompleteAddOp
(
true
);
auto
new_net_op
=
net
.
Clone
();
ASSERT_NE
(
new_net_op
,
nullptr
);
ASSERT_TRUE
(
new_net_op
->
IsNetOp
());
auto
*
new_net
=
static_cast
<
NetOp
*>
(
new_net_op
.
get
());
ASSERT_EQ
(
2
,
new_net
->
ops_
.
size
());
ASSERT_EQ
(
new_net
->
ops_
[
0
]
->
Type
(),
"empty"
);
ASSERT_EQ
(
new_net
->
ops_
[
1
]
->
Type
(),
"empty2"
);
}
}
// namespace operators
}
// namespace paddle
paddle/operators/recurrent_op.h
浏览文件 @
635b8672
...
...
@@ -110,10 +110,17 @@ class RecurrentGradientAlgorithm {
std
::
shared_ptr
<
NetOp
>*
stepnet_
;
};
class
RecurrentOp
final
:
public
framework
::
OperatorBase
{
class
RecurrentOp
:
public
framework
::
OperatorBase
{
public:
RecurrentOp
(
const
std
::
string
&
type
,
const
VarNameMap
&
inputs
,
const
VarNameMap
&
outputs
,
const
framework
::
AttributeMap
&
attrs
);
RecurrentOp
(
const
RecurrentOp
&
o
)
:
framework
::
OperatorBase
(
static_cast
<
const
framework
::
OperatorBase
&>
(
o
))
{
// TODO(yuyang18): Implement copy ctor well.
PADDLE_THROW
(
"Not implemented"
);
}
/**
* InferShape must be called before Run.
*/
...
...
@@ -137,12 +144,19 @@ class RecurrentOp final : public framework::OperatorBase {
std
::
shared_ptr
<
NetOp
>
stepnet_
;
};
class
RecurrentGradientOp
final
:
public
framework
::
OperatorBase
{
class
RecurrentGradientOp
:
public
framework
::
OperatorBase
{
public:
RecurrentGradientOp
(
const
std
::
string
&
type
,
const
VarNameMap
&
inputs
,
const
VarNameMap
&
outputs
,
const
framework
::
AttributeMap
&
attrs
);
RecurrentGradientOp
(
const
RecurrentGradientOp
&
o
)
:
framework
::
OperatorBase
(
static_cast
<
const
framework
::
OperatorBase
&>
(
o
))
{
// TODO(yuyang18): Implement Copy ctor.
PADDLE_THROW
(
"Not Implemented"
);
}
/**
* InferShape must be called before Run.
*/
...
...
paddle/platform/CMakeLists.txt
浏览文件 @
635b8672
...
...
@@ -16,5 +16,8 @@ ELSE()
set
(
GPU_CTX_DEPS
)
ENDIF
()
cc_library
(
device_context SRCS device_context.cc DEPS place eigen3
${
GPU_CTX_DEPS
}
)
# memcpy deoends on device_context, here add deps individually for
# avoiding cycle dependencies
cc_library
(
device_context SRCS device_context.cc DEPS memory buddy_allocator
system_allocator memory_block meta_data meta_cache place eigen3
${
GPU_CTX_DEPS
}
)
nv_test
(
device_context_test SRCS device_context_test.cc DEPS device_context gpu_info
)
paddle/platform/device_context.cc
浏览文件 @
635b8672
...
...
@@ -10,6 +10,7 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/platform/device_context.h"
#include "paddle/memory/memory.h"
namespace
paddle
{
namespace
platform
{
...
...
@@ -36,6 +37,59 @@ Place CPUDeviceContext::GetPlace() const { return CPUPlace(); }
#ifndef PADDLE_ONLY_CPU
class
EigenCudaStreamDevice
:
public
Eigen
::
StreamInterface
{
public:
EigenCudaStreamDevice
()
:
scratch_
(
nullptr
),
semaphore_
(
nullptr
)
{
Eigen
::
initializeDeviceProp
();
}
~
EigenCudaStreamDevice
()
override
{}
void
Reinitialize
(
const
cudaStream_t
*
cuda_stream
,
GPUPlace
place
)
{
stream_
=
cuda_stream
;
place_
=
place
;
device_prop_
=
&
Eigen
::
m_deviceProperties
[
place
.
device
];
}
const
cudaStream_t
&
stream
()
const
override
{
return
*
stream_
;
}
const
cudaDeviceProp
&
deviceProperties
()
const
override
{
return
*
device_prop_
;
}
void
*
allocate
(
size_t
num_bytes
)
const
override
{
return
paddle
::
memory
::
Alloc
(
place_
,
num_bytes
);
}
void
deallocate
(
void
*
buffer
)
const
override
{
paddle
::
memory
::
Free
(
place_
,
buffer
);
}
void
*
scratchpad
()
const
override
{
if
(
scratch_
==
NULL
)
{
scratch_
=
allocate
(
Eigen
::
kCudaScratchSize
+
sizeof
(
unsigned
int
));
}
return
scratch_
;
}
unsigned
int
*
semaphore
()
const
override
{
if
(
semaphore_
==
NULL
)
{
char
*
scratch
=
static_cast
<
char
*>
(
scratchpad
())
+
Eigen
::
kCudaScratchSize
;
semaphore_
=
reinterpret_cast
<
unsigned
int
*>
(
scratch
);
PADDLE_ENFORCE
(
cudaMemsetAsync
(
semaphore_
,
0
,
sizeof
(
unsigned
int
),
*
stream_
));
}
return
semaphore_
;
}
private:
GPUPlace
place_
;
const
cudaStream_t
*
stream_
;
// not owned;
const
cudaDeviceProp
*
device_prop_
;
// not owned;
mutable
void
*
scratch_
;
mutable
unsigned
int
*
semaphore_
;
};
template
<
>
Eigen
::
GpuDevice
*
DeviceContext
::
get_eigen_device
<
Eigen
::
GpuDevice
>
()
const
{
return
reinterpret_cast
<
const
CUDADeviceContext
*>
(
this
)
->
eigen_device
();
...
...
@@ -43,19 +97,9 @@ Eigen::GpuDevice* DeviceContext::get_eigen_device<Eigen::GpuDevice>() const {
CUDADeviceContext
::
CUDADeviceContext
(
GPUPlace
place
)
:
place_
(
place
)
{
SetDeviceId
(
place_
.
device
);
// TODO(qijun) Pass a created cuda stream to Eigen::CudaStreamDevice directly
// here will cause segment fault. We must implement a class derived from
// Eigen::StreamInterface, and reinitialize it with a cuda stream and a gpu id
// later. Please refer to the implementation of class EigenCudaStreamDevice
// in TensorFlow.
//
// We find that CUDA 7 introduces a new option, the per-thread default stream,
// that has two effects. Please refer to https://devblogs.nvidia.com/
// parallelforall/gpu-pro-tip-cuda-7-streams-simplify-concurrency/
//
// So, we decide to use default stream and add –default-stream per-thread nvcc
// flag. Than, two threads with two CUDADeviceContexts will run parallelly.
eigen_stream_
.
reset
(
new
Eigen
::
CudaStreamDevice
());
PADDLE_ENFORCE
(
cudaStreamCreate
(
&
stream_
));
eigen_stream_
.
reset
(
new
EigenCudaStreamDevice
());
eigen_stream_
->
Reinitialize
(
&
stream_
,
place
);
eigen_device_
.
reset
(
new
Eigen
::
GpuDevice
(
eigen_stream_
.
get
()));
}
...
...
@@ -75,12 +119,13 @@ CUDADeviceContext::~CUDADeviceContext() {
}
eigen_stream_
.
reset
();
eigen_device_
.
reset
();
PADDLE_ENFORCE
(
cudaStreamDestroy
(
stream_
));
}
Place
CUDADeviceContext
::
GetPlace
()
const
{
return
place_
;
}
void
CUDADeviceContext
::
Wait
()
const
{
PADDLE_ENFORCE
(
cudaStreamSynchronize
(
0
));
PADDLE_ENFORCE
(
cudaStreamSynchronize
(
stream_
));
}
Eigen
::
GpuDevice
*
CUDADeviceContext
::
eigen_device
()
const
{
...
...
@@ -91,6 +136,7 @@ cublasHandle_t CUDADeviceContext::cublas_handle() {
if
(
!
cublas_handle_
)
{
SetDeviceId
(
place_
.
device
);
PADDLE_ENFORCE
(
dynload
::
cublasCreate
(
&
cublas_handle_
));
PADDLE_ENFORCE
(
dynload
::
cublasSetStream
(
cublas_handle_
,
stream_
));
}
return
cublas_handle_
;
}
...
...
@@ -99,10 +145,13 @@ cudnnHandle_t CUDADeviceContext::cudnn_handle() {
if
(
!
cudnn_handle_
)
{
SetDeviceId
(
place_
.
device
);
PADDLE_ENFORCE
(
dynload
::
cudnnCreate
(
&
cudnn_handle_
));
PADDLE_ENFORCE
(
dynload
::
cudnnSetStream
(
cudnn_handle_
,
stream_
));
}
return
cudnn_handle_
;
}
cudaStream_t
CUDADeviceContext
::
stream
()
{
return
stream_
;
}
curandGenerator_t
CUDADeviceContext
::
curand_generator
()
{
if
(
!
curand_generator_
)
{
SetDeviceId
(
place_
.
device
);
...
...
@@ -110,6 +159,8 @@ curandGenerator_t CUDADeviceContext::curand_generator() {
CURAND_RNG_PSEUDO_DEFAULT
));
PADDLE_ENFORCE
(
dynload
::
curandSetPseudoRandomGeneratorSeed
(
curand_generator_
,
seed_
));
PADDLE_ENFORCE
(
dynload
::
curandSetStream
(
curand_generator_
,
stream_
));
}
return
curand_generator_
;
}
...
...
paddle/platform/device_context.h
浏览文件 @
635b8672
...
...
@@ -52,6 +52,7 @@ class CPUDeviceContext : public DeviceContext {
};
#ifndef PADDLE_ONLY_CPU
class
EigenCudaStreamDevice
;
class
CUDADeviceContext
:
public
DeviceContext
{
public:
...
...
@@ -76,6 +77,9 @@ class CUDADeviceContext : public DeviceContext {
/*! \brief Return curand handle in the device context. */
curandGenerator_t
curand_generator
();
/*! \brief Return cuda stream in the device context. */
cudaStream_t
stream
();
// clang-format on
private:
...
...
@@ -83,15 +87,16 @@ class CUDADeviceContext : public DeviceContext {
private:
std
::
unique_ptr
<
Eigen
::
GpuDevice
>
eigen_device_
;
std
::
unique_ptr
<
Eigen
::
CudaStreamDevice
>
eigen_stream_
;
std
::
unique_ptr
<
EigenCudaStreamDevice
>
eigen_stream_
;
private:
uint64_t
seed_
;
// clang-format off
cudnnHandle_t
cudnn_handle_
=
nullptr
;
cublasHandle_t
cublas_handle_
=
nullptr
;
curandGenerator_t
curand_generator_
=
nullptr
;
cudaStream_t
stream_
{
nullptr
};
cudnnHandle_t
cudnn_handle_
{
nullptr
};
cublasHandle_t
cublas_handle_
{
nullptr
};
curandGenerator_t
curand_generator_
{
nullptr
};
// clang-format on
};
...
...
paddle/platform/device_context_test.cc
浏览文件 @
635b8672
...
...
@@ -45,6 +45,7 @@ TEST(Device, CUDADeviceContext) {
ASSERT_NE
(
nullptr
,
cublas_handle
);
curandGenerator_t
curand_handle
=
device_context
->
curand_generator
();
ASSERT_NE
(
nullptr
,
curand_handle
);
ASSERT_NE
(
nullptr
,
device_context
->
stream
());
delete
device_context
;
}
}
paddle/platform/enforce.h
浏览文件 @
635b8672
...
...
@@ -86,7 +86,7 @@ struct EnforceNotMet : public std::exception {
2
+
sizeof
(
void
*
)
*
2
,
call_stack
[
i
],
demangled
,
addr_offset
);
}
else
{
sout
<<
string
::
Sprintf
(
"%-3d %*0p
%s
\n
"
,
i
,
2
+
sizeof
(
void
*
)
*
2
,
sout
<<
string
::
Sprintf
(
"%-3d %*0p
\n
"
,
i
,
2
+
sizeof
(
void
*
)
*
2
,
call_stack
[
i
]);
}
}
...
...
paddle/scripts/docker/build.sh
浏览文件 @
635b8672
...
...
@@ -120,25 +120,6 @@ EOF
/woboq/indexgenerator/codebrowser_indexgenerator
$WOBOQ_OUT
fi
# generate deb package for current build
# FIXME(typhoonzero): should we remove paddle/scripts/deb ?
if
[[
${
WITH_DEB
:-
ON
}
==
"ON"
]]
;
then
cat
<<
EOF
========================================
Generating .deb package ...
========================================
EOF
set
+e
cpack
-D
CPACK_GENERATOR
=
'DEB'
-j
`
nproc
`
..
err_code
=
$?
if
[
${
err_code
}
-ne
0
]
;
then
# cat error logs if cpack failed.
cat
/paddle/build/_CPack_Packages/Linux/DEB/PreinstallOutput.log
exit
${
err_code
}
fi
set
-e
fi
cat
<<
EOF
========================================
Generate /paddle/build/Dockerfile ...
...
...
@@ -158,14 +139,13 @@ EOF
fi
cat
>>
/paddle/build/Dockerfile
<<
EOF
# Use different deb file when building different type of images
ADD *.deb /
ADD python/dist/*.whl /
# run paddle version to install python packages first
RUN apt-get update &&
\
apt-get install -y wget python-pip && pip install -U pip &&
\
dpkg -i /*.deb
; apt-get install -f -y &&
\
pip install /*.whl
; apt-get install -f -y &&
\
apt-get clean -y &&
\
rm -f /*.
deb
&&
\
rm -f /*.
whl
&&
\
paddle version
${
DOCKERFILE_CUDNN_DSO
}
${
DOCKERFILE_GPU_ENV
}
...
...
paddle/scripts/submit_local.sh.in
浏览文件 @
635b8672
...
...
@@ -56,8 +56,7 @@ if [ -z "${PADDLE_NO_STAT+x}" ]; then
fi
fi
MYDIR
=
"
$(
cd
"
$(
dirname
"
${
BASH_SOURCE
[0]
}
"
)
"
&&
pwd
)
"
PADDLE_BIN_PATH
=
"
$(
cd
"
$(
dirname
"
${
BASH_SOURCE
[0]
}
"
)
"
&&
pwd
)
"
if
[
!
-z
"
${
DEBUGGER
}
"
]
;
then
echo
"Using debug command
${
DEBUGGER
}
"
...
...
@@ -93,34 +92,16 @@ else:
sys.exit(0)
EOF
if
[
$?
-eq
1
]
;
then
# Older version installed, or not installed at all
echo
"First time run paddle, need to install some python dependencies."
# setuptools normalizes package version, so we need to use normalized
# package version for paddle python package
PYTHON_PADDLE_VERSION
=
$(
python
-c
'import packaging.version
import setuptools
print str(packaging.version.Version("@PADDLE_VERSION@"))
'
2>/dev/null
)
BASEDIR
=
$(
dirname
"
$0
"
)
pip
install
${
BASEDIR
}
/../opt/paddle/share/wheels/
*
-
${
PYTHON_PADDLE_VERSION
}
-
*
.whl
if
[
$?
-ne
0
]
;
then
echo
"pip install wheels failed. "
echo
"Please use 'sudo paddle' at the first time you use PaddlePaddle"
echo
"PaddlePaddle will install some python dependencies automatically."
exit
1
fi
echo
"Python dependencies are installed."
fi
case
"
$1
"
in
"train"
)
${
DEBUGGER
}
$
MYDIR
/../opt/paddle/bin
/paddle_trainer
${
@
:2
}
${
DEBUGGER
}
$
PADDLE_BIN_PATH
/paddle_trainer
${
@
:2
}
;;
"merge_model"
)
${
DEBUGGER
}
$
MYDIR
/../opt/paddle/bin
/paddle_merge_model
${
@
:2
}
${
DEBUGGER
}
$
PADDLE_BIN_PATH
/paddle_merge_model
${
@
:2
}
;;
"pserver"
)
${
DEBUGGER
}
$
MYDIR
/../opt/paddle/bin
/paddle_pserver_main
${
@
:2
}
${
DEBUGGER
}
$
PADDLE_BIN_PATH
/paddle_pserver_main
${
@
:2
}
;;
"dump_config"
)
python
-m
paddle.utils.dump_config
${
@
:2
}
...
...
@@ -129,7 +110,7 @@ case "$1" in
python
-m
paddle.utils.make_model_diagram
${
@
:2
}
;;
"usage"
)
$
MYDIR
/../opt/paddle/bin
/paddle_usage
${
@
:2
}
$
PADDLE_BIN_PATH
/paddle_usage
${
@
:2
}
;;
"version"
)
version
...
...
python/paddle/v2/trainer.py
浏览文件 @
635b8672
...
...
@@ -27,16 +27,24 @@ class SGD(object):
SGD Trainer combines data reader, network topolopy and update_equation together
to train/test a neural network.
:param update_equation: The optimizer object.
:type update_equation: paddle.v2.optimizer.Optimizer
:param cost: Target cost that neural network should be optimized.
:type cost: paddle.v2.config_base.Layer
:param parameters: The parameters dictionary.
:type parameters: paddle.v2.parameters.Parameters
:param update_equation: The optimizer object.
:type update_equation: paddle.v2.optimizer.Optimizer
:param extra_layers: Some layers in the neural network graph are not
in the path of cost layer.
:param pserver_spec: pserver location, eg: localhost:3000
:type extra_layers: paddle.v2.config_base.Layer
:param is_local: Whether trainning locally
:type is_local: bool
:param pserver_spec: comma string for pserver location,
eg:127.10.0.10:3000,127.10.0.11:3000,
and this parameter is only used for fault
tolerant mode cluster training.
:type pserver_spec: string
:param use_etcd: Whether using etcd pserver.
:param use_etcd: bool
"""
def
__init__
(
self
,
...
...
python/setup.py.in
浏览文件 @
635b8672
...
...
@@ -24,14 +24,17 @@ if '${CMAKE_SYSTEM_PROCESSOR}' not in ['arm', 'armv7-a', 'aarch64']:
setup_requires+=["opencv-python"]
# the prefix is sys.prefix which should always be usr
paddle_bin_dir = '
local/
opt/paddle/bin'
paddle_bin_dir = 'opt/paddle/bin'
paddle_bins = ['${PADDLE_BINARY_DIR}/paddle/scripts/paddle_usage',
'${PADDLE_BINARY_DIR}/paddle/trainer/paddle_trainer',
'${PADDLE_BINARY_DIR}/paddle/trainer/paddle_merge_model',
'${PADDLE_BINARY_DIR}/paddle/pserver/paddle_pserver_main']
'${PADDLE_BINARY_DIR}/paddle/pserver/paddle_pserver_main',
'${PADDLE_BINARY_DIR}/paddle/scripts/paddle']
paddle_rt_lib_dir = 'local/lib'
paddle_rt_libs = [] if '${MKL_SHARED_LIBS}'== '' else '${MKL_SHARED_LIBS}'.split(';')
paddle_rt_lib_dir = 'lib'
paddle_rt_libs = ['${WARPCTC_LIBRARIES}']
if '${MKL_SHARED_LIBS}'!= '':
paddle_rt_libs += '${MKL_SHARED_LIBS}'.split(';')
setup(name='paddlepaddle',
version='${PADDLE_VERSION}',
...
...
@@ -50,8 +53,7 @@ setup(name='paddlepaddle',
'paddle.v2.framework.proto': '${PADDLE_BINARY_DIR}/paddle/framework',
'py_paddle': '${PADDLE_SOURCE_DIR}/paddle/py_paddle'
},
scripts=
['${PADDLE_BINARY_DIR}/paddle/scripts/paddle']
,
scripts=
paddle_bins
,
distclass=BinaryDistribution,
data_files=[(paddle_bin_dir, paddle_bins),
(paddle_rt_lib_dir, paddle_rt_libs)]
data_files=[(paddle_rt_lib_dir, paddle_rt_libs)]
)
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录