Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
9b0fce51
P
Paddle
项目概览
PaddlePaddle
/
Paddle
1 年多 前同步成功
通知
2302
Star
20931
Fork
5422
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
9b0fce51
编写于
8月 04, 2017
作者:
F
fengjiayi
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'develop' of
https://github.com/PaddlePaddle/Paddle
into dev_add_FillZerosLikeOp_test
上级
9d68c252
548b72a6
变更
34
显示空白变更内容
内联
并排
Showing
34 changed file
with
580 addition
and
61 deletion
+580
-61
CMakeLists.txt
CMakeLists.txt
+2
-2
cmake/generic.cmake
cmake/generic.cmake
+15
-1
paddle/framework/ddim.h
paddle/framework/ddim.h
+3
-6
paddle/framework/grad_op_builder.h
paddle/framework/grad_op_builder.h
+14
-0
paddle/framework/op_registry.h
paddle/framework/op_registry.h
+3
-3
paddle/framework/operator.h
paddle/framework/operator.h
+1
-1
paddle/framework/pybind.cc
paddle/framework/pybind.cc
+10
-1
paddle/gserver/gradientmachines/RecurrentGradientMachine.cpp
paddle/gserver/gradientmachines/RecurrentGradientMachine.cpp
+19
-7
paddle/gserver/gradientmachines/RecurrentGradientMachine.h
paddle/gserver/gradientmachines/RecurrentGradientMachine.h
+10
-2
paddle/gserver/tests/LayerGradUtil.cpp
paddle/gserver/tests/LayerGradUtil.cpp
+13
-1
paddle/gserver/tests/LayerGradUtil.h
paddle/gserver/tests/LayerGradUtil.h
+4
-1
paddle/memory/detail/buddy_allocator.h
paddle/memory/detail/buddy_allocator.h
+1
-1
paddle/memory/detail/meta_cache.h
paddle/memory/detail/meta_cache.h
+4
-4
paddle/memory/memory.h
paddle/memory/memory.h
+1
-1
paddle/operators/add_op.cu
paddle/operators/add_op.cu
+14
-0
paddle/operators/cross_entropy_op.cu
paddle/operators/cross_entropy_op.cu
+15
-1
paddle/operators/fill_zeros_like_op.cu
paddle/operators/fill_zeros_like_op.cu
+15
-1
paddle/operators/mean_op.cu
paddle/operators/mean_op.cu
+15
-1
paddle/operators/mul_op.cu
paddle/operators/mul_op.cu
+1
-1
paddle/operators/recurrent_op.h
paddle/operators/recurrent_op.h
+8
-12
paddle/operators/rowwise_add_op.cu
paddle/operators/rowwise_add_op.cu
+14
-0
paddle/operators/sgd_op.cu
paddle/operators/sgd_op.cu
+15
-1
paddle/operators/sigmoid_op.cu
paddle/operators/sigmoid_op.cu
+14
-0
paddle/operators/softmax_op.cu
paddle/operators/softmax_op.cu
+16
-1
paddle/platform/device_context.h
paddle/platform/device_context.h
+4
-4
paddle/platform/dynload/cublas.cc
paddle/platform/dynload/cublas.cc
+14
-0
paddle/platform/dynload/cudnn.cc
paddle/platform/dynload/cudnn.cc
+15
-1
paddle/platform/dynload/curand.cc
paddle/platform/dynload/curand.cc
+18
-3
paddle/platform/enforce.h
paddle/platform/enforce.h
+45
-0
paddle/platform/enforce_test.cc
paddle/platform/enforce_test.cc
+162
-0
paddle/platform/place.h
paddle/platform/place.h
+1
-1
paddle/string/piece.h
paddle/string/piece.h
+2
-2
python/paddle/v2/framework/tests/CMakeLists.txt
python/paddle/v2/framework/tests/CMakeLists.txt
+2
-1
python/paddle/v2/framework/tests/gradient_checker.py
python/paddle/v2/framework/tests/gradient_checker.py
+90
-0
未找到文件。
CMakeLists.txt
浏览文件 @
9b0fce51
...
@@ -36,8 +36,8 @@ include(simd)
...
@@ -36,8 +36,8 @@ include(simd)
################################ Configurations #######################################
################################ Configurations #######################################
option
(
WITH_GPU
"Compile PaddlePaddle with NVIDIA GPU"
${
CUDA_FOUND
}
)
option
(
WITH_GPU
"Compile PaddlePaddle with NVIDIA GPU"
${
CUDA_FOUND
}
)
option
(
WITH_AVX
"Compile PaddlePaddle with AVX intrinsics"
${
AVX_FOUND
}
)
option
(
WITH_AVX
"Compile PaddlePaddle with AVX intrinsics"
${
AVX_FOUND
}
)
option
(
WITH_MKLDNN
"Compile PaddlePaddle with mkl-dnn support."
${
AVX_FOUND
}
)
option
(
WITH_MKLDNN
"Compile PaddlePaddle with mkl-dnn support."
OFF
)
option
(
WITH_MKLML
"Compile PaddlePaddle with mklml package."
${
AVX_FOUND
}
)
option
(
WITH_MKLML
"Compile PaddlePaddle with mklml package."
OFF
)
option
(
WITH_DSO
"Compile PaddlePaddle with dynamic linked CUDA"
ON
)
option
(
WITH_DSO
"Compile PaddlePaddle with dynamic linked CUDA"
ON
)
option
(
WITH_TESTING
"Compile PaddlePaddle with unit testing"
ON
)
option
(
WITH_TESTING
"Compile PaddlePaddle with unit testing"
ON
)
option
(
WITH_SWIG_PY
"Compile PaddlePaddle with inference api"
ON
)
option
(
WITH_SWIG_PY
"Compile PaddlePaddle with inference api"
ON
)
...
...
cmake/generic.cmake
浏览文件 @
9b0fce51
...
@@ -187,7 +187,13 @@ function(cc_library TARGET_NAME)
...
@@ -187,7 +187,13 @@ function(cc_library TARGET_NAME)
endif
()
endif
()
# cpplint code style
# cpplint code style
add_style_check_target
(
${
TARGET_NAME
}
${
cc_library_SRCS
}
)
foreach
(
source_file
${
cc_library_SRCS
}
)
string
(
REGEX REPLACE
"
\\
.[^.]*$"
""
source
${
source_file
}
)
if
(
EXISTS
${
CMAKE_CURRENT_SOURCE_DIR
}
/
${
source
}
.h
)
list
(
APPEND cc_library_HEADERS
${
CMAKE_CURRENT_SOURCE_DIR
}
/
${
source
}
.h
)
endif
()
endforeach
()
add_style_check_target
(
${
TARGET_NAME
}
${
cc_library_SRCS
}
${
cc_library_HEADERS
}
)
else
(
cc_library_SRCS
)
else
(
cc_library_SRCS
)
if
(
cc_library_DEPS
)
if
(
cc_library_DEPS
)
...
@@ -239,6 +245,14 @@ function(nv_library TARGET_NAME)
...
@@ -239,6 +245,14 @@ function(nv_library TARGET_NAME)
add_dependencies
(
${
TARGET_NAME
}
${
nv_library_DEPS
}
)
add_dependencies
(
${
TARGET_NAME
}
${
nv_library_DEPS
}
)
target_link_libraries
(
${
TARGET_NAME
}
${
nv_library_DEPS
}
)
target_link_libraries
(
${
TARGET_NAME
}
${
nv_library_DEPS
}
)
endif
()
endif
()
# cpplint code style
foreach
(
source_file
${
nv_library_SRCS
}
)
string
(
REGEX REPLACE
"
\\
.[^.]*$"
""
source
${
source_file
}
)
if
(
EXISTS
${
CMAKE_CURRENT_SOURCE_DIR
}
/
${
source
}
.h
)
list
(
APPEND cc_library_HEADERS
${
CMAKE_CURRENT_SOURCE_DIR
}
/
${
source
}
.h
)
endif
()
endforeach
()
add_style_check_target
(
${
TARGET_NAME
}
${
nv_library_SRCS
}
${
nv_library_HEADERS
}
)
else
(
nv_library_SRCS
)
else
(
nv_library_SRCS
)
if
(
nv_library_DEPS
)
if
(
nv_library_DEPS
)
merge_static_libs
(
${
TARGET_NAME
}
${
nv_library_DEPS
}
)
merge_static_libs
(
${
TARGET_NAME
}
${
nv_library_DEPS
}
)
...
...
paddle/framework/ddim.h
浏览文件 @
9b0fce51
...
@@ -25,18 +25,15 @@ limitations under the License. */
...
@@ -25,18 +25,15 @@ limitations under the License. */
namespace
paddle
{
namespace
paddle
{
namespace
framework
{
namespace
framework
{
namespace
{
typedef
boost
::
variant
<
Dim
<
1
>
,
Dim
<
2
>
,
Dim
<
3
>
,
Dim
<
4
>
,
Dim
<
5
>
,
Dim
<
6
>
,
Dim
<
7
>
,
Dim
<
8
>
,
Dim
<
9
>>
DDimVar
;
}
/**
/**
* \brief A dynamically sized dimension.
* \brief A dynamically sized dimension.
*
*
* The number of dimensions must be between [1, 9].
* The number of dimensions must be between [1, 9].
*/
*/
struct
DDim
{
struct
DDim
{
typedef
boost
::
variant
<
Dim
<
1
>
,
Dim
<
2
>
,
Dim
<
3
>
,
Dim
<
4
>
,
Dim
<
5
>
,
Dim
<
6
>
,
Dim
<
7
>
,
Dim
<
8
>
,
Dim
<
9
>>
DDimVar
;
DDimVar
var
;
DDimVar
var
;
DDim
()
:
var
(
Dim
<
1
>
())
{}
DDim
()
:
var
(
Dim
<
1
>
())
{}
...
...
paddle/framework/grad_op_builder.h
浏览文件 @
9b0fce51
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#pragma once
#include "paddle/framework/operator.h"
#include "paddle/framework/operator.h"
...
...
paddle/framework/op_registry.h
浏览文件 @
9b0fce51
...
@@ -314,7 +314,7 @@ class OpRegistry {
...
@@ -314,7 +314,7 @@ class OpRegistry {
static
std
::
unordered_map
<
std
::
string
,
OpProto
>&
protos
()
{
static
std
::
unordered_map
<
std
::
string
,
OpProto
>&
protos
()
{
static
std
::
unordered_map
<
std
::
string
,
OpProto
>
protos_
;
static
std
::
unordered_map
<
std
::
string
,
OpProto
>
protos_
;
return
protos_
;
return
protos_
;
}
;
}
static
std
::
unordered_map
<
std
::
string
,
std
::
string
>&
grad_ops
()
{
static
std
::
unordered_map
<
std
::
string
,
std
::
string
>&
grad_ops
()
{
static
std
::
unordered_map
<
std
::
string
,
std
::
string
>
grad_ops_
;
static
std
::
unordered_map
<
std
::
string
,
std
::
string
>
grad_ops_
;
...
@@ -336,7 +336,7 @@ class OpRegistry {
...
@@ -336,7 +336,7 @@ class OpRegistry {
static
std
::
unordered_map
<
std
::
string
,
OpAttrChecker
>&
op_checkers
()
{
static
std
::
unordered_map
<
std
::
string
,
OpAttrChecker
>&
op_checkers
()
{
static
std
::
unordered_map
<
std
::
string
,
OpAttrChecker
>
op_checkers_
;
static
std
::
unordered_map
<
std
::
string
,
OpAttrChecker
>
op_checkers_
;
return
op_checkers_
;
return
op_checkers_
;
}
;
}
static
void
GenerateTempVariableName
(
OperatorBase
*
op
)
{
static
void
GenerateTempVariableName
(
OperatorBase
*
op
)
{
static
std
::
atomic
<
size_t
>
gUniqId
(
0UL
);
static
std
::
atomic
<
size_t
>
gUniqId
(
0UL
);
...
@@ -353,7 +353,7 @@ class OpRegistry {
...
@@ -353,7 +353,7 @@ class OpRegistry {
template
<
typename
OpType
,
typename
ProtoMakerType
>
template
<
typename
OpType
,
typename
ProtoMakerType
>
class
OpRegisterHelper
{
class
OpRegisterHelper
{
public:
public:
OpRegisterHelper
(
const
char
*
op_type
)
{
explicit
OpRegisterHelper
(
const
char
*
op_type
)
{
OpRegistry
::
RegisterOp
<
OpType
,
ProtoMakerType
>
(
op_type
);
OpRegistry
::
RegisterOp
<
OpType
,
ProtoMakerType
>
(
op_type
);
}
}
};
};
...
...
paddle/framework/operator.h
浏览文件 @
9b0fce51
...
@@ -284,7 +284,7 @@ class OperatorWithKernel : public OperatorBase {
...
@@ -284,7 +284,7 @@ class OperatorWithKernel : public OperatorBase {
platform
::
Place
place_
;
platform
::
Place
place_
;
OpKernelKey
()
=
default
;
OpKernelKey
()
=
default
;
OpKernelKey
(
const
platform
::
DeviceContext
&
dev_ctx
)
{
explicit
OpKernelKey
(
const
platform
::
DeviceContext
&
dev_ctx
)
{
place_
=
dev_ctx
.
GetPlace
();
place_
=
dev_ctx
.
GetPlace
();
}
}
...
...
paddle/framework/pybind.cc
浏览文件 @
9b0fce51
...
@@ -106,7 +106,16 @@ PYBIND11_PLUGIN(core) {
...
@@ -106,7 +106,16 @@ PYBIND11_PLUGIN(core) {
.
def
(
"set"
,
PyCUDATensorSetFromArray
<
float
>
)
.
def
(
"set"
,
PyCUDATensorSetFromArray
<
float
>
)
.
def
(
"set"
,
PyCUDATensorSetFromArray
<
int
>
)
.
def
(
"set"
,
PyCUDATensorSetFromArray
<
int
>
)
#endif
#endif
.
def
(
"shape"
,
[](
Tensor
&
self
)
{
return
vectorize
(
self
.
dims
());
});
.
def
(
"shape"
,
[](
Tensor
&
self
)
{
return
vectorize
(
self
.
dims
());
})
.
def
(
"set_float_element"
,
[](
Tensor
&
self
,
size_t
offset
,
float
f
)
{
// TODO(yuyang18): Only support GPU now.
self
.
data
<
float
>
()[
offset
]
=
f
;
})
.
def
(
"get_float_element"
,
[](
Tensor
&
self
,
size_t
offset
)
->
float
{
// TODO(yuyang18): Only support GPU now.
return
self
.
data
<
float
>
()[
offset
];
});
py
::
class_
<
Variable
>
(
m
,
"Variable"
,
R"DOC(Variable Class.
py
::
class_
<
Variable
>
(
m
,
"Variable"
,
R"DOC(Variable Class.
...
...
paddle/gserver/gradientmachines/RecurrentGradientMachine.cpp
浏览文件 @
9b0fce51
...
@@ -967,8 +967,9 @@ void RecurrentGradientMachine::generateSequence() {
...
@@ -967,8 +967,9 @@ void RecurrentGradientMachine::generateSequence() {
size_t
numSequences
=
getGenBatchSize
();
size_t
numSequences
=
getGenBatchSize
();
resizeBootFrame
(
numSequences
);
resizeBootFrame
(
numSequences
);
// We create only two sub-network in generation for alternate use.
// We create only two sub-network in generation, one stores states of all
// Thus, we can reduce total memory of output_ in layer forward.
// layers in previous time step and the other storing the states at current
// time step.
resizeOrCreateFrames
(
2
);
resizeOrCreateFrames
(
2
);
// outFrameLines_.size() > 1UL
// outFrameLines_.size() > 1UL
...
@@ -1001,10 +1002,9 @@ void RecurrentGradientMachine::generateSequence() {
...
@@ -1001,10 +1002,9 @@ void RecurrentGradientMachine::generateSequence() {
// init outArg
// init outArg
size_t
resultNum
=
generator_
.
config
.
num_results_per_sample
();
size_t
resultNum
=
generator_
.
config
.
num_results_per_sample
();
IVector
::
resizeOrCreate
(
size_t
maxGenWordCount
=
generator_
.
outArg
.
ids
,
generator_
.
config
.
max_num_frames
()
*
numSequences
*
resultNum
;
generator_
.
config
.
max_num_frames
()
*
numSequences
*
resultNum
,
IVector
::
resizeOrCreate
(
generator_
.
outArg
.
ids
,
maxGenWordCount
,
false
);
false
);
if
(
resultNum
>
1
)
{
if
(
resultNum
>
1
)
{
CHECK_LE
(
resultNum
,
static_cast
<
size_t
>
(
generator_
.
config
.
beam_size
()));
CHECK_LE
(
resultNum
,
static_cast
<
size_t
>
(
generator_
.
config
.
beam_size
()));
Matrix
::
resizeOrCreate
(
generator_
.
outArg
.
in
,
Matrix
::
resizeOrCreate
(
generator_
.
outArg
.
in
,
...
@@ -1012,6 +1012,11 @@ void RecurrentGradientMachine::generateSequence() {
...
@@ -1012,6 +1012,11 @@ void RecurrentGradientMachine::generateSequence() {
/* width */
resultNum
,
/* width */
resultNum
,
false
,
false
,
/* useGpu */
false
);
/* useGpu */
false
);
Matrix
::
resizeOrCreate
(
generator_
.
outArg
.
value
,
/* height */
maxGenWordCount
,
/* width */
1
,
false
,
/* useGpu */
false
);
}
}
ICpuGpuVector
::
resizeOrCreate
(
generator_
.
outArg
.
sequenceStartPositions
,
ICpuGpuVector
::
resizeOrCreate
(
generator_
.
outArg
.
sequenceStartPositions
,
numSequences
+
1
,
numSequences
+
1
,
...
@@ -1313,13 +1318,20 @@ void RecurrentGradientMachine::fillGenOutputs() {
...
@@ -1313,13 +1318,20 @@ void RecurrentGradientMachine::fillGenOutputs() {
starts
[
0
]
=
0
;
starts
[
0
]
=
0
;
if
(
numResults
>
1
)
{
if
(
numResults
>
1
)
{
real
*
probs
=
generator_
.
outArg
.
in
->
getData
();
real
*
probs
=
generator_
.
outArg
.
in
->
getData
();
real
*
idsProb
=
generator_
.
outArg
.
value
->
getData
();
size_t
curPos
=
0
;
for
(
size_t
i
=
0
;
i
<
finalPaths_
.
size
();
++
i
)
{
for
(
size_t
i
=
0
;
i
<
finalPaths_
.
size
();
++
i
)
{
for
(
size_t
j
=
0
;
j
<
finalPaths_
[
i
].
size
();
++
j
)
{
for
(
size_t
j
=
0
;
j
<
finalPaths_
[
i
].
size
();
++
j
)
{
Path
&
path
=
finalPaths_
[
i
][
j
];
Path
&
path
=
finalPaths_
[
i
][
j
];
generator_
.
ids
.
push_back
(
path
.
ids
.
size
());
// sequence size
size_t
genLen
=
path
.
ids
.
size
();
generator_
.
ids
.
push_back
(
genLen
);
// sequence size
generator_
.
ids
.
insert
(
generator_
.
ids
.
insert
(
generator_
.
ids
.
end
(),
path
.
ids
.
begin
(),
path
.
ids
.
end
());
generator_
.
ids
.
end
(),
path
.
ids
.
begin
(),
path
.
ids
.
end
());
generator_
.
ids
.
push_back
(
-
1
);
// end of sequence
generator_
.
ids
.
push_back
(
-
1
);
// end of sequence
memcpy
(
idsProb
+
curPos
,
path
.
idsProb
.
data
(),
sizeof
(
real
)
*
genLen
);
curPos
+=
genLen
;
idsProb
[
curPos
++
]
=
-
1.0
;
probs
[
i
*
numResults
+
j
]
=
path
.
logProb
;
probs
[
i
*
numResults
+
j
]
=
path
.
logProb
;
if
(
!
j
&&
dataArgsSize_
)
{
if
(
!
j
&&
dataArgsSize_
)
{
...
...
paddle/gserver/gradientmachines/RecurrentGradientMachine.h
浏览文件 @
9b0fce51
...
@@ -189,6 +189,11 @@ public:
...
@@ -189,6 +189,11 @@ public:
*/
*/
std
::
vector
<
int
>
ids
;
std
::
vector
<
int
>
ids
;
/**
* @brief idsProb, log probability of each generated words.
*/
std
::
vector
<
real
>
idsProb
;
/**
/**
* @brief logProb, current probability of path.
* @brief logProb, current probability of path.
*/
*/
...
@@ -228,11 +233,13 @@ public:
...
@@ -228,11 +233,13 @@ public:
*/
*/
Path
(
Path
&
old
,
int
newId
,
real
logProb
,
int
machineId
,
int
topIndex
)
Path
(
Path
&
old
,
int
newId
,
real
logProb
,
int
machineId
,
int
topIndex
)
:
ids
(
old
.
ids
),
:
ids
(
old
.
ids
),
idsProb
(
old
.
idsProb
),
logProb
(
old
.
logProb
+
logProb
),
logProb
(
old
.
logProb
+
logProb
),
machineId
(
machineId
),
machineId
(
machineId
),
topIndex
(
topIndex
),
topIndex
(
topIndex
),
seqId
(
old
.
seqId
)
{
seqId
(
old
.
seqId
)
{
ids
.
push_back
(
newId
);
ids
.
push_back
(
newId
);
idsProb
.
push_back
(
logProb
);
if
(
!
old
.
probHistory
.
empty
())
{
if
(
!
old
.
probHistory
.
empty
())
{
this
->
probHistory
=
old
.
probHistory
;
this
->
probHistory
=
old
.
probHistory
;
// probHistory store current prob, not sum
// probHistory store current prob, not sum
...
@@ -412,6 +419,7 @@ protected:
...
@@ -412,6 +419,7 @@ protected:
struct
Generator
{
struct
Generator
{
GeneratorConfig
config
;
GeneratorConfig
config
;
std
::
vector
<
int
>
ids
;
// store generated sequences
std
::
vector
<
int
>
ids
;
// store generated sequences
std
::
vector
<
real
>
idsProb
;
// log probability of each generated word
Argument
outArg
;
// final output argument
Argument
outArg
;
// final output argument
};
};
bool
generating_
;
bool
generating_
;
...
...
paddle/gserver/tests/LayerGradUtil.cpp
浏览文件 @
9b0fce51
...
@@ -400,7 +400,6 @@ void initDataLayer(TestConfig testConf,
...
@@ -400,7 +400,6 @@ void initDataLayer(TestConfig testConf,
const
std
::
vector
<
int
>&
labelSeqStartPositions
=
const
std
::
vector
<
int
>&
labelSeqStartPositions
=
testConf
.
inputDefs
[
i
].
labelSeqStartPositions
;
testConf
.
inputDefs
[
i
].
labelSeqStartPositions
;
if
(
labelSeqStartPositions
.
size
()
!=
0
)
{
if
(
labelSeqStartPositions
.
size
()
!=
0
)
{
CHECK
(
!
sequenceStartPositions
);
CHECK_GE
(
static_cast
<
int
>
(
labelSeqStartPositions
.
size
()),
2
);
CHECK_GE
(
static_cast
<
int
>
(
labelSeqStartPositions
.
size
()),
2
);
sequenceStartPositions
=
sequenceStartPositions
=
...
@@ -410,6 +409,19 @@ void initDataLayer(TestConfig testConf,
...
@@ -410,6 +409,19 @@ void initDataLayer(TestConfig testConf,
useGpu
);
useGpu
);
data
.
sequenceStartPositions
=
sequenceStartPositions
;
data
.
sequenceStartPositions
=
sequenceStartPositions
;
}
}
const
std
::
vector
<
int
>&
labelSubSeqStartPositions
=
testConf
.
inputDefs
[
i
].
labelSubSeqStartPositions
;
if
(
labelSubSeqStartPositions
.
size
()
!=
0
)
{
CHECK_GE
(
static_cast
<
int
>
(
labelSubSeqStartPositions
.
size
()),
2
);
subSequenceStartPositions
=
ICpuGpuVector
::
create
(
labelSubSeqStartPositions
.
size
(),
useGpu
);
subSequenceStartPositions
->
copyFrom
(
labelSubSeqStartPositions
.
data
(),
labelSubSeqStartPositions
.
size
(),
useGpu
);
data
.
subSequenceStartPositions
=
subSequenceStartPositions
;
}
break
;
break
;
}
}
default:
default:
...
...
paddle/gserver/tests/LayerGradUtil.h
浏览文件 @
9b0fce51
...
@@ -67,6 +67,7 @@ struct InputDef {
...
@@ -67,6 +67,7 @@ struct InputDef {
bool
isStatic
;
bool
isStatic
;
std
::
vector
<
int
>
labelInitValue
;
std
::
vector
<
int
>
labelInitValue
;
std
::
vector
<
int
>
labelSeqStartPositions
;
std
::
vector
<
int
>
labelSeqStartPositions
;
std
::
vector
<
int
>
labelSubSeqStartPositions
;
MatrixPtr
selfDefinedData
;
MatrixPtr
selfDefinedData
;
InputDef
(
InputType
type
,
string
nameIn
,
size_t
dimIn
,
size_t
sizeIn
)
{
InputDef
(
InputType
type
,
string
nameIn
,
size_t
dimIn
,
size_t
sizeIn
)
{
...
@@ -81,8 +82,10 @@ struct InputDef {
...
@@ -81,8 +82,10 @@ struct InputDef {
InputDef
(
InputType
type
,
InputDef
(
InputType
type
,
string
nameIn
,
string
nameIn
,
MatrixPtr
selfDefinedData
,
MatrixPtr
selfDefinedData
,
std
::
vector
<
int
>
selfDefinedSeqStartPos
=
{})
std
::
vector
<
int
>
selfDefinedSeqStartPos
=
{},
std
::
vector
<
int
>
selfDefinedSubSeqStartPos
=
{})
:
labelSeqStartPositions
(
selfDefinedSeqStartPos
),
:
labelSeqStartPositions
(
selfDefinedSeqStartPos
),
labelSubSeqStartPositions
(
selfDefinedSubSeqStartPos
),
selfDefinedData
(
selfDefinedData
)
{
selfDefinedData
(
selfDefinedData
)
{
inputType
=
type
;
inputType
=
type
;
name
=
nameIn
;
name
=
nameIn
;
...
...
paddle/memory/detail/buddy_allocator.h
浏览文件 @
9b0fce51
...
@@ -39,7 +39,7 @@ class BuddyAllocator {
...
@@ -39,7 +39,7 @@ class BuddyAllocator {
public:
public:
void
*
Alloc
(
size_t
unaligned_size
);
void
*
Alloc
(
size_t
unaligned_size
);
void
Free
(
void
*
);
void
Free
(
void
*
ptr
);
size_t
Used
();
size_t
Used
();
public:
public:
...
...
paddle/memory/detail/meta_cache.h
浏览文件 @
9b0fce51
...
@@ -33,17 +33,17 @@ namespace detail {
...
@@ -33,17 +33,17 @@ namespace detail {
*/
*/
class
MetadataCache
{
class
MetadataCache
{
public:
public:
MetadataCache
(
bool
uses_gpu
);
explicit
MetadataCache
(
bool
uses_gpu
);
public:
public:
/*! \brief Load the associated metadata for the specified memory block. */
/*! \brief Load the associated metadata for the specified memory block. */
Metadata
load
(
const
MemoryBlock
*
);
Metadata
load
(
const
MemoryBlock
*
memory_block
);
/*! \brief Store the associated metadata for the specified memory block. */
/*! \brief Store the associated metadata for the specified memory block. */
void
store
(
MemoryBlock
*
,
const
Metadata
&
);
void
store
(
MemoryBlock
*
memory_block
,
const
Metadata
&
meta_data
);
/*! \brief Indicate that the specified metadata will no longer be used. */
/*! \brief Indicate that the specified metadata will no longer be used. */
void
invalidate
(
MemoryBlock
*
);
void
invalidate
(
MemoryBlock
*
memory_block
);
public:
public:
MetadataCache
(
const
MetadataCache
&
)
=
delete
;
MetadataCache
(
const
MetadataCache
&
)
=
delete
;
...
...
paddle/memory/memory.h
浏览文件 @
9b0fce51
...
@@ -68,7 +68,7 @@ class PODDeleter {
...
@@ -68,7 +68,7 @@ class PODDeleter {
static_assert
(
std
::
is_pod
<
T
>::
value
,
"T must be POD"
);
static_assert
(
std
::
is_pod
<
T
>::
value
,
"T must be POD"
);
public:
public:
PODDeleter
(
Place
place
)
:
place_
(
place
)
{}
explicit
PODDeleter
(
Place
place
)
:
place_
(
place
)
{}
void
operator
()(
T
*
ptr
)
{
Free
(
place_
,
static_cast
<
void
*>
(
ptr
));
}
void
operator
()(
T
*
ptr
)
{
Free
(
place_
,
static_cast
<
void
*>
(
ptr
));
}
private:
private:
...
...
paddle/operators/add_op.cu
浏览文件 @
9b0fce51
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#define EIGEN_USE_GPU
#define EIGEN_USE_GPU
#include "paddle/framework/op_registry.h"
#include "paddle/framework/op_registry.h"
#include "paddle/operators/add_op.h"
#include "paddle/operators/add_op.h"
...
...
paddle/operators/cross_entropy_op.cu
浏览文件 @
9b0fce51
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#define EIGEN_USE_GPU
#define EIGEN_USE_GPU
#include "paddle/operators/cross_entropy_op.h"
#include "paddle/operators/cross_entropy_op.h"
...
...
paddle/operators/fill_zeros_like_op.cu
浏览文件 @
9b0fce51
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#define EIGEN_USE_GPU
#define EIGEN_USE_GPU
#include "paddle/framework/op_registry.h"
#include "paddle/framework/op_registry.h"
#include "paddle/operators/fill_zeros_like_op.h"
#include "paddle/operators/fill_zeros_like_op.h"
...
...
paddle/operators/mean_op.cu
浏览文件 @
9b0fce51
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#define EIGEN_USE_GPU
#define EIGEN_USE_GPU
#include "paddle/operators/mean_op.h"
#include "paddle/operators/mean_op.h"
...
...
paddle/operators/mul_op.cu
浏览文件 @
9b0fce51
paddle/operators/recurrent_op.h
浏览文件 @
9b0fce51
...
@@ -19,7 +19,7 @@
...
@@ -19,7 +19,7 @@
namespace
paddle
{
namespace
paddle
{
namespace
operators
{
namespace
operators
{
using
namespace
paddle
::
framework
;
using
namespace
paddle
::
framework
;
// NOLINT
namespace
rnn
{
namespace
rnn
{
...
@@ -94,7 +94,7 @@ void InitArgument(const ArgumentName& name, Argument* arg);
...
@@ -94,7 +94,7 @@ void InitArgument(const ArgumentName& name, Argument* arg);
};
// namespace rnn
};
// namespace rnn
// The sequence format in RecurrentOp is Tensor<seq_len, batch_size, dim> now.
// The sequence format in RecurrentOp is Tensor<seq_len, batch_size, dim> now.
// TODO:
// TODO
(Yan Chunwei)
:
// 1. No-padding computing for sequences with indifinite length in one batch.
// 1. No-padding computing for sequences with indifinite length in one batch.
// 2. Hierarchical RNN for sequence with sub-sequence.
// 2. Hierarchical RNN for sequence with sub-sequence.
// 3. Internal Memory.
// 3. Internal Memory.
...
@@ -172,11 +172,9 @@ public:
...
@@ -172,11 +172,9 @@ public:
/**
/**
* InferShape must be called before Run.
* InferShape must be called before Run.
*/
*/
virtual
void
InferShape
(
const
Scope
&
scope
)
const
override
{
void
InferShape
(
const
Scope
&
scope
)
const
override
{
alg_
.
InferShape
(
scope
);
}
alg_
.
InferShape
(
scope
);
}
v
irtual
v
oid
Run
(
const
Scope
&
scope
,
void
Run
(
const
Scope
&
scope
,
const
platform
::
DeviceContext
&
dev_ctx
)
const
override
{
const
platform
::
DeviceContext
&
dev_ctx
)
const
override
{
alg_
.
Run
(
scope
,
dev_ctx
);
alg_
.
Run
(
scope
,
dev_ctx
);
}
}
...
@@ -194,11 +192,9 @@ public:
...
@@ -194,11 +192,9 @@ public:
/**
/**
* InferShape must be called before Run.
* InferShape must be called before Run.
*/
*/
virtual
void
InferShape
(
const
Scope
&
scope
)
const
override
{
void
InferShape
(
const
Scope
&
scope
)
const
override
{
alg_
.
InferShape
(
scope
);
}
alg_
.
InferShape
(
scope
);
}
v
irtual
v
oid
Run
(
const
Scope
&
scope
,
void
Run
(
const
Scope
&
scope
,
const
platform
::
DeviceContext
&
dev_ctx
)
const
override
{
const
platform
::
DeviceContext
&
dev_ctx
)
const
override
{
alg_
.
Run
(
scope
,
dev_ctx
);
alg_
.
Run
(
scope
,
dev_ctx
);
}
}
...
...
paddle/operators/rowwise_add_op.cu
浏览文件 @
9b0fce51
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#define EIGEN_USE_GPU
#define EIGEN_USE_GPU
#include "paddle/operators/rowwise_add_op.h"
#include "paddle/operators/rowwise_add_op.h"
...
...
paddle/operators/sgd_op.cu
浏览文件 @
9b0fce51
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#define EIGEN_USE_GPU
#define EIGEN_USE_GPU
#include "paddle/operators/sgd_op.h"
#include "paddle/operators/sgd_op.h"
...
...
paddle/operators/sigmoid_op.cu
浏览文件 @
9b0fce51
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#define EIGEN_USE_GPU
#define EIGEN_USE_GPU
#include "paddle/operators/sigmoid_op.h"
#include "paddle/operators/sigmoid_op.h"
...
...
paddle/operators/softmax_op.cu
浏览文件 @
9b0fce51
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#define EIGEN_USE_GPU
#define EIGEN_USE_GPU
#include "paddle/framework/op_registry.h"
#include "paddle/framework/op_registry.h"
#include "paddle/operators/softmax_op.h"
#include "paddle/operators/softmax_op.h"
REGISTER_OP_GPU_KERNEL
(
softmax
,
ops
::
SoftmaxKernel
<
ops
::
GPUPlace
,
float
>
);
REGISTER_OP_GPU_KERNEL
(
softmax
,
ops
::
SoftmaxKernel
<
ops
::
GPUPlace
,
float
>
);
REGISTER_OP_GPU_KERNEL
(
softmax_grad
,
ops
::
SoftmaxGradKernel
<
ops
::
GPUPlace
,
float
>
);
REGISTER_OP_GPU_KERNEL
(
softmax_grad
,
ops
::
SoftmaxGradKernel
<
ops
::
GPUPlace
,
float
>
);
paddle/platform/device_context.h
浏览文件 @
9b0fce51
...
@@ -40,7 +40,7 @@ class DeviceContext {
...
@@ -40,7 +40,7 @@ class DeviceContext {
class
CPUDeviceContext
:
public
DeviceContext
{
class
CPUDeviceContext
:
public
DeviceContext
{
public:
public:
CPUDeviceContext
();
CPUDeviceContext
();
CPUDeviceContext
(
CPUPlace
);
explicit
CPUDeviceContext
(
CPUPlace
);
virtual
~
CPUDeviceContext
()
{}
virtual
~
CPUDeviceContext
()
{}
Eigen
::
DefaultDevice
*
eigen_device
()
const
;
Eigen
::
DefaultDevice
*
eigen_device
()
const
;
...
@@ -55,7 +55,7 @@ class CPUDeviceContext : public DeviceContext {
...
@@ -55,7 +55,7 @@ class CPUDeviceContext : public DeviceContext {
class
CUDADeviceContext
:
public
DeviceContext
{
class
CUDADeviceContext
:
public
DeviceContext
{
public:
public:
explicit
CUDADeviceContext
(
GPUPlace
);
CUDADeviceContext
(
GPUPlace
);
// NOLINT
virtual
~
CUDADeviceContext
();
virtual
~
CUDADeviceContext
();
/*! \brief Wait for all operations completion in the stream. */
/*! \brief Wait for all operations completion in the stream. */
...
@@ -69,10 +69,10 @@ class CUDADeviceContext : public DeviceContext {
...
@@ -69,10 +69,10 @@ class CUDADeviceContext : public DeviceContext {
// clang-format off
// clang-format off
/*! \brief Return cublas handle in the device context. */
/*! \brief Return cublas handle in the device context. */
cublasHandle_t
cublas_handle
();
cublasHandle_t
cublas_handle
();
/*! \brief Return cudnn handle in the device context. */
/*! \brief Return cudnn handle in the device context. */
cudnnHandle_t
cudnn_handle
();
cudnnHandle_t
cudnn_handle
();
/*! \brief Return curand handle in the device context. */
/*! \brief Return curand handle in the device context. */
curandGenerator_t
curand_generator
();
curandGenerator_t
curand_generator
();
...
...
paddle/platform/dynload/cublas.cc
浏览文件 @
9b0fce51
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <paddle/platform/dynload/cublas.h>
#include <paddle/platform/dynload/cublas.h>
namespace
paddle
{
namespace
paddle
{
...
...
paddle/platform/dynload/cudnn.cc
浏览文件 @
9b0fce51
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <paddle/platform/dynload/cudnn.h>
#include <paddle/platform/dynload/cudnn.h>
namespace
paddle
{
namespace
paddle
{
...
...
paddle/platform/dynload/curand.cc
浏览文件 @
9b0fce51
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <paddle/platform/dynload/curand.h>
#include <paddle/platform/dynload/curand.h>
namespace
paddle
{
namespace
paddle
{
...
@@ -10,6 +24,7 @@ void *curand_dso_handle;
...
@@ -10,6 +24,7 @@ void *curand_dso_handle;
#define DEFINE_WRAP(__name) DynLoad__##__name __name
#define DEFINE_WRAP(__name) DynLoad__##__name __name
CURAND_RAND_ROUTINE_EACH
(
DEFINE_WRAP
);
CURAND_RAND_ROUTINE_EACH
(
DEFINE_WRAP
);
}
}
}
// namespace dynload
}
}
// namespace platform
\ No newline at end of file
}
// namespace paddle
paddle/platform/enforce.h
浏览文件 @
9b0fce51
...
@@ -162,5 +162,50 @@ inline void throw_on_error(T e) {
...
@@ -162,5 +162,50 @@ inline void throw_on_error(T e) {
} \
} \
} while (0)
} while (0)
/*
* Some enforce helpers here, usage:
* int a = 1;
* int b = 2;
* PADDLE_ENFORCE_EQ(a, b);
*
* will raise an expression described as follows:
* "enforce a == b failed, 1 != 2" with detailed stack infomation.
*
* extra messages is also supported, for example:
* PADDLE_ENFORCE(a, b, "some simple enforce failed between %d numbers", 2)
*/
#define PADDLE_ENFORCE_EQ(__VAL0, __VAL1, ...) \
__PADDLE_BINARY_COMPARE(__VAL0, __VAL1, ==, !=, __VA_ARGS__)
#define PADDLE_ENFORCE_NE(__VAL0, __VAL1, ...) \
__PADDLE_BINARY_COMPARE(__VAL0, __VAL1, !=, ==, __VA_ARGS__)
#define PADDLE_ENFORCE_GT(__VAL0, __VAL1, ...) \
__PADDLE_BINARY_COMPARE(__VAL0, __VAL1, >, <=, __VA_ARGS__)
#define PADDLE_ENFORCE_GE(__VAL0, __VAL1, ...) \
__PADDLE_BINARY_COMPARE(__VAL0, __VAL1, >=, <, __VA_ARGS__)
#define PADDLE_ENFORCE_LT(__VAL0, __VAL1, ...) \
__PADDLE_BINARY_COMPARE(__VAL0, __VAL1, <, >=, __VA_ARGS__)
#define PADDLE_ENFORCE_LE(__VAL0, __VAL1, ...) \
__PADDLE_BINARY_COMPARE(__VAL0, __VAL1, <=, >, __VA_ARGS__)
// if two values have different data types, choose a compatible type for them.
template
<
typename
T1
,
typename
T2
>
struct
CompatibleType
{
static
const
bool
t1_to_t2
=
std
::
is_convertible
<
T1
,
T2
>::
value
;
typedef
typename
std
::
conditional
<
t1_to_t2
,
T2
,
T1
>::
type
type
;
};
#define __PADDLE_BINARY_COMPARE(__VAL0, __VAL1, __CMP, __INV_CMP, ...) \
PADDLE_ENFORCE(__COMPATIBLE_TYPE(__VAL0, __VAL1, __VAL0) \
__CMP __COMPATIBLE_TYPE(__VAL0, __VAL1, __VAL1), \
"enforce %s " #__CMP " %s failed, %s " #__INV_CMP " %s\n%s", \
#__VAL0, #__VAL1, std::to_string(__VAL0), \
std::to_string(__VAL1), \
paddle::string::Sprintf("" __VA_ARGS__));
#define __COMPATIBLE_TYPE(__VAL0, __VAL1, __VAL) \
typename paddle::platform::CompatibleType<decltype(__VAL0), \
decltype(__VAL1)>::type(__VAL)
}
// namespace platform
}
// namespace platform
}
// namespace paddle
}
// namespace paddle
paddle/platform/enforce_test.cc
浏览文件 @
9b0fce51
...
@@ -34,3 +34,165 @@ TEST(ENFORCE, FAILED) {
...
@@ -34,3 +34,165 @@ TEST(ENFORCE, FAILED) {
}
}
ASSERT_TRUE
(
in_catch
);
ASSERT_TRUE
(
in_catch
);
}
}
TEST
(
ENFORCE
,
NO_ARG_OK
)
{
int
a
=
2
;
int
b
=
2
;
PADDLE_ENFORCE_EQ
(
a
,
b
);
// test enforce with extra message.
PADDLE_ENFORCE_EQ
(
a
,
b
,
"some thing wrong %s"
,
"info"
);
}
TEST
(
ENFORCE_EQ
,
NO_EXTRA_MSG_FAIL
)
{
int
a
=
2
;
bool
in_catch
=
false
;
try
{
PADDLE_ENFORCE_EQ
(
a
,
1
+
3
);
}
catch
(
paddle
::
platform
::
EnforceNotMet
error
)
{
in_catch
=
true
;
const
std
::
string
msg
=
"enforce a == 1 + 3 failed, 2 != 4"
;
const
char
*
what
=
error
.
what
();
for
(
size_t
i
=
0
;
i
<
msg
.
length
();
++
i
)
{
ASSERT_EQ
(
what
[
i
],
msg
[
i
]);
}
}
ASSERT_TRUE
(
in_catch
);
}
TEST
(
ENFORCE_EQ
,
EXTRA_MSG_FAIL
)
{
int
a
=
2
;
bool
in_catch
=
false
;
try
{
PADDLE_ENFORCE_EQ
(
a
,
1
+
3
,
"%s size not match"
,
"their"
);
}
catch
(
paddle
::
platform
::
EnforceNotMet
error
)
{
in_catch
=
true
;
const
std
::
string
msg
=
"enforce a == 1 + 3 failed, 2 != 4
\n
their size not match"
;
const
char
*
what
=
error
.
what
();
for
(
size_t
i
=
0
;
i
<
msg
.
length
();
++
i
)
{
ASSERT_EQ
(
what
[
i
],
msg
[
i
]);
}
}
ASSERT_TRUE
(
in_catch
);
}
TEST
(
ENFORCE_NE
,
OK
)
{
PADDLE_ENFORCE_NE
(
1
,
2
);
PADDLE_ENFORCE_NE
(
1.0
,
2UL
);
}
TEST
(
ENFORCE_NE
,
FAIL
)
{
bool
in_catch
=
false
;
try
{
// 2UL here to check data type compatible
PADDLE_ENFORCE_NE
(
1.0
,
1UL
);
}
catch
(
paddle
::
platform
::
EnforceNotMet
error
)
{
in_catch
=
true
;
const
std
::
string
msg
=
"enforce 1.0 != 1UL failed, 1.000000 == 1"
;
const
char
*
what
=
error
.
what
();
for
(
size_t
i
=
0
;
i
<
msg
.
length
();
++
i
)
{
ASSERT_EQ
(
what
[
i
],
msg
[
i
]);
}
}
ASSERT_TRUE
(
in_catch
);
}
TEST
(
ENFORCE_GT
,
OK
)
{
PADDLE_ENFORCE_GT
(
2
,
1
);
}
TEST
(
ENFORCE_GT
,
FAIL
)
{
bool
in_catch
=
false
;
try
{
// 2UL here to check data type compatible
PADDLE_ENFORCE_GT
(
1
,
2UL
);
}
catch
(
paddle
::
platform
::
EnforceNotMet
error
)
{
in_catch
=
true
;
const
std
::
string
msg
=
"enforce 1 > 2UL failed, 1 <= 2"
;
const
char
*
what
=
error
.
what
();
for
(
size_t
i
=
0
;
i
<
msg
.
length
();
++
i
)
{
ASSERT_EQ
(
what
[
i
],
msg
[
i
]);
}
}
ASSERT_TRUE
(
in_catch
);
}
TEST
(
ENFORCE_GE
,
OK
)
{
PADDLE_ENFORCE_GE
(
2
,
2UL
);
PADDLE_ENFORCE_GE
(
3
,
2UL
);
PADDLE_ENFORCE_GE
(
3
,
2
);
PADDLE_ENFORCE_GE
(
3.21
,
2UL
);
}
TEST
(
ENFORCE_GE
,
FAIL
)
{
bool
in_catch
=
false
;
try
{
PADDLE_ENFORCE_GE
(
1
,
2UL
);
}
catch
(
paddle
::
platform
::
EnforceNotMet
error
)
{
in_catch
=
true
;
const
std
::
string
msg
=
"enforce 1 >= 2UL failed, 1 < 2"
;
const
char
*
what
=
error
.
what
();
for
(
size_t
i
=
0
;
i
<
msg
.
length
();
++
i
)
{
ASSERT_EQ
(
what
[
i
],
msg
[
i
]);
}
}
ASSERT_TRUE
(
in_catch
);
}
TEST
(
ENFORCE_LE
,
OK
)
{
PADDLE_ENFORCE_LE
(
1
,
1
);
PADDLE_ENFORCE_LE
(
1
,
1UL
);
PADDLE_ENFORCE_LE
(
2
,
3UL
);
PADDLE_ENFORCE_LE
(
2UL
,
3
);
PADDLE_ENFORCE_LE
(
2UL
,
3.2
);
}
TEST
(
ENFORCE_LE
,
FAIL
)
{
bool
in_catch
=
false
;
try
{
PADDLE_ENFORCE_GT
(
1
,
2UL
);
}
catch
(
paddle
::
platform
::
EnforceNotMet
error
)
{
in_catch
=
true
;
const
std
::
string
msg
=
"enforce 1 > 2UL failed, 1 <= 2"
;
const
char
*
what
=
error
.
what
();
for
(
size_t
i
=
0
;
i
<
msg
.
length
();
++
i
)
{
ASSERT_EQ
(
what
[
i
],
msg
[
i
]);
}
}
ASSERT_TRUE
(
in_catch
);
}
TEST
(
ENFORCE_LT
,
OK
)
{
PADDLE_ENFORCE_LT
(
3
,
10
);
PADDLE_ENFORCE_LT
(
2
,
3UL
);
PADDLE_ENFORCE_LT
(
2UL
,
3
);
}
TEST
(
ENFORCE_LT
,
FAIL
)
{
bool
in_catch
=
false
;
try
{
PADDLE_ENFORCE_LT
(
1UL
,
0.12
);
}
catch
(
paddle
::
platform
::
EnforceNotMet
error
)
{
in_catch
=
true
;
const
std
::
string
msg
=
"enforce 1UL < 0.12 failed, 1 >= 0.12"
;
const
char
*
what
=
error
.
what
();
for
(
size_t
i
=
0
;
i
<
msg
.
length
();
++
i
)
{
ASSERT_EQ
(
what
[
i
],
msg
[
i
]);
}
}
ASSERT_TRUE
(
in_catch
);
}
paddle/platform/place.h
浏览文件 @
9b0fce51
...
@@ -32,7 +32,7 @@ struct CPUPlace {
...
@@ -32,7 +32,7 @@ struct CPUPlace {
struct
GPUPlace
{
struct
GPUPlace
{
GPUPlace
()
:
GPUPlace
(
0
)
{}
GPUPlace
()
:
GPUPlace
(
0
)
{}
GPUPlace
(
int
d
)
:
device
(
d
)
{}
GPUPlace
(
int
d
)
:
device
(
d
)
{}
// NOLINT
// needed for variant equality comparison
// needed for variant equality comparison
inline
bool
operator
==
(
const
GPUPlace
&
o
)
const
{
return
device
==
o
.
device
;
}
inline
bool
operator
==
(
const
GPUPlace
&
o
)
const
{
return
device
==
o
.
device
;
}
...
...
paddle/string/piece.h
浏览文件 @
9b0fce51
...
@@ -39,8 +39,8 @@ public:
...
@@ -39,8 +39,8 @@ public:
// size_ is 0.
// size_ is 0.
Piece
();
Piece
();
Piece
(
const
char
*
d
,
size_t
n
);
Piece
(
const
char
*
d
,
size_t
n
);
Piece
(
const
char
*
d
);
Piece
(
const
char
*
d
);
// NOLINT
Piece
(
const
std
::
string
&
s
);
Piece
(
const
std
::
string
&
s
);
// NOLINT
const
char
*
data
()
const
{
return
data_
;
}
const
char
*
data
()
const
{
return
data_
;
}
size_t
len
()
const
{
return
size_
;
}
size_t
len
()
const
{
return
size_
;
}
...
...
python/paddle/v2/framework/tests/CMakeLists.txt
浏览文件 @
9b0fce51
...
@@ -14,4 +14,5 @@ add_python_test(test_framework
...
@@ -14,4 +14,5 @@ add_python_test(test_framework
test_softmax_op.py
test_softmax_op.py
test_rowwise_add_op.py
test_rowwise_add_op.py
test_fill_zeros_like_op.py
test_fill_zeros_like_op.py
test_network.py
)
test_network.py
gradient_checker.py
)
python/paddle/v2/framework/tests/gradient_checker.py
0 → 100644
浏览文件 @
9b0fce51
import
paddle.v2.framework.core
as
core
from
paddle.v2.framework.create_op_creation_methods
import
op_creations
import
numpy
import
unittest
__all__
=
[
'get_numeric_gradient'
]
def
get_numeric_gradient
(
op
,
input_values
,
output_name
,
input_to_check
,
delta
=
1e-2
,
local_scope
=
None
):
"""
Get Numeric Gradient for an operator's input.
:param op: C++ operator instance, could be an network
:param input_values: The input variables. Should be an dictionary, key is
variable name. Value is numpy array.
:param output_name: The final output variable name.
:param input_to_check: The input variable need to get gradient.
:param delta: The perturbation value for numeric gradient method. The
smaller delta is, the more accurate result will get. But if that delta is
too small, it could occur numerical stability problem.
:param local_scope: The local scope used for get_numeric_gradient.
:return: The gradient array in numpy format.
"""
if
local_scope
is
None
:
local_scope
=
core
.
Scope
()
# Create all input variable in local_scope
for
var_name
in
input_values
:
var
=
local_scope
.
new_var
(
var_name
)
tensor
=
var
.
get_tensor
()
tensor
.
set_dims
(
input_values
[
var_name
].
shape
)
tensor
.
alloc_float
(
core
.
CPUPlace
())
tensor
.
set
(
input_values
[
var_name
],
core
.
CPUPlace
())
# Create all output variable in local_scope
for
output
in
op
.
outputs
():
if
local_scope
.
find_var
(
output
)
is
None
:
local_scope
.
new_var
(
output
).
get_tensor
()
op
.
infer_shape
(
local_scope
)
# allocate output memory
for
output
in
op
.
outputs
():
local_scope
.
find_var
(
output
).
get_tensor
().
alloc_float
(
core
.
CPUPlace
())
# TODO(yuyang18): Only CPU is support now.
cpu_ctx
=
core
.
DeviceContext
.
create
(
core
.
CPUPlace
())
def
get_output
():
op
.
run
(
local_scope
,
cpu_ctx
)
return
numpy
.
array
(
local_scope
.
find_var
(
output_name
).
get_tensor
()).
sum
()
def
product
(
dim
):
return
reduce
(
lambda
a
,
b
:
a
*
b
,
dim
,
1
)
tensor_to_check
=
local_scope
.
find_var
(
input_to_check
).
get_tensor
()
tensor_size
=
product
(
tensor_to_check
.
get_dims
())
gradient_flat
=
numpy
.
zeros
(
shape
=
(
tensor_size
,
),
dtype
=
'float32'
)
for
i
in
xrange
(
tensor_size
):
origin
=
tensor_to_check
.
get_float_element
(
i
)
x_pos
=
origin
+
delta
tensor_to_check
.
set_float_element
(
i
,
x_pos
)
y_pos
=
get_output
()
x_neg
=
origin
-
delta
tensor_to_check
.
set_float_element
(
i
,
x_neg
)
y_neg
=
get_output
()
tensor_to_check
.
set_float_element
(
i
,
origin
)
# restore old value
gradient_flat
[
i
]
=
(
y_pos
-
y_neg
)
/
delta
/
2
return
gradient_flat
.
reshape
(
tensor_to_check
.
get_dims
())
if
__name__
==
'__main__'
:
class
GetNumericGradientTest
(
unittest
.
TestCase
):
def
test_add_op
(
self
):
add_op
=
op_creations
.
add_two
(
X
=
"X"
,
Y
=
"Y"
,
Out
=
"Z"
)
x
=
numpy
.
random
.
random
((
10
,
1
)).
astype
(
"float32"
)
y
=
numpy
.
random
.
random
((
10
,
1
)).
astype
(
"float32"
)
arr
=
get_numeric_gradient
(
add_op
,
{
'X'
:
x
,
"Y"
:
y
},
'Z'
,
'X'
)
self
.
assertAlmostEqual
(
arr
.
mean
(),
1.0
,
delta
=
1e-2
)
unittest
.
main
()
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录