Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleDetection
提交
1aeb6c5c
P
PaddleDetection
项目概览
PaddlePaddle
/
PaddleDetection
大约 1 年 前同步成功
通知
694
Star
11112
Fork
2696
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
184
列表
看板
标记
里程碑
合并请求
40
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleDetection
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
184
Issue
184
列表
看板
标记
里程碑
合并请求
40
合并请求
40
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
1aeb6c5c
编写于
5月 16, 2018
作者:
Q
qiaolongfei
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'develop' of
https://github.com/PaddlePaddle/Paddle
into add-mkldnn-to-paddle-lib
上级
269a62a2
e5288627
变更
15
显示空白变更内容
内联
并排
Showing
15 changed file
with
55 addition
and
43 deletion
+55
-43
cmake/external/snappy.cmake
cmake/external/snappy.cmake
+0
-2
cmake/external/snappystream.cmake
cmake/external/snappystream.cmake
+0
-2
paddle/CMakeLists.txt
paddle/CMakeLists.txt
+1
-1
paddle/fluid/framework/data_type.cc
paddle/fluid/framework/data_type.cc
+1
-0
paddle/fluid/framework/data_type.h
paddle/fluid/framework/data_type.h
+7
-1
paddle/fluid/framework/details/fetch_op_handle.cc
paddle/fluid/framework/details/fetch_op_handle.cc
+8
-7
paddle/fluid/framework/framework.proto
paddle/fluid/framework/framework.proto
+1
-0
paddle/fluid/framework/lod_tensor_test.cc
paddle/fluid/framework/lod_tensor_test.cc
+13
-4
paddle/fluid/framework/operator.h
paddle/fluid/framework/operator.h
+0
-1
paddle/fluid/operators/math/math_function.cc
paddle/fluid/operators/math/math_function.cc
+3
-1
paddle/fluid/platform/CMakeLists.txt
paddle/fluid/platform/CMakeLists.txt
+1
-1
paddle/scripts/paddle_build.sh
paddle/scripts/paddle_build.sh
+1
-0
python/paddle/fluid/tests/unittests/CMakeLists.txt
python/paddle/fluid/tests/unittests/CMakeLists.txt
+2
-0
python/paddle/fluid/tests/unittests/test_network_with_dtype.py
...n/paddle/fluid/tests/unittests/test_network_with_dtype.py
+15
-20
python/paddle/fluid/tests/unittests/test_parallel_executor.py
...on/paddle/fluid/tests/unittests/test_parallel_executor.py
+2
-3
未找到文件。
cmake/external/snappy.cmake
浏览文件 @
1aeb6c5c
...
...
@@ -47,8 +47,6 @@ ExternalProject_Add(
-DCMAKE_INSTALL_LIBDIR:PATH=
${
SNAPPY_INSTALL_DIR
}
/lib
-DCMAKE_POSITION_INDEPENDENT_CODE:BOOL=ON
-DCMAKE_BUILD_TYPE:STRING=
${
THIRD_PARTY_BUILD_TYPE
}
BUILD_COMMAND make -j8
INSTALL_COMMAND make install
)
add_library
(
snappy STATIC IMPORTED GLOBAL
)
...
...
cmake/external/snappystream.cmake
浏览文件 @
1aeb6c5c
...
...
@@ -46,8 +46,6 @@ ExternalProject_Add(
-DCMAKE_INSTALL_PREFIX:PATH=
${
SNAPPYSTREAM_INSTALL_DIR
}
-DCMAKE_INSTALL_LIBDIR:PATH=
${
SNAPPYSTREAM_INSTALL_DIR
}
/lib
-DCMAKE_BUILD_TYPE:STRING=
${
THIRD_PARTY_BUILD_TYPE
}
BUILD_COMMAND make -j8
INSTALL_COMMAND make install
DEPENDS snappy
)
...
...
paddle/CMakeLists.txt
浏览文件 @
1aeb6c5c
...
...
@@ -24,6 +24,6 @@ if(NOT WITH_FLUID_ONLY)
endif
()
add_subdirectory
(
testing
)
if
(
NOT MOBILE_INFERENCE AND NOT RPI
)
if
(
NOT MOBILE_INFERENCE AND NOT RPI
AND NOT WITH_C_API
)
add_subdirectory
(
fluid
)
endif
()
paddle/fluid/framework/data_type.cc
浏览文件 @
1aeb6c5c
...
...
@@ -58,6 +58,7 @@ static DataTypeMap* InitDataTypeMap() {
RegType
(
bool
,
proto
::
VarType
::
BOOL
);
RegType
(
size_t
,
proto
::
VarType
::
SIZE_T
);
RegType
(
int16_t
,
proto
::
VarType
::
INT16
);
RegType
(
uint8_t
,
proto
::
VarType
::
UINT8
);
#undef RegType
return
retv
;
...
...
paddle/fluid/framework/data_type.h
浏览文件 @
1aeb6c5c
...
...
@@ -47,8 +47,14 @@ inline void VisitDataType(proto::VarType::Type type, Visitor visitor) {
case
proto
::
VarType
::
BOOL
:
visitor
.
template
operator
()
<
bool
>();
break
;
case
proto
::
VarType
::
UINT8
:
visitor
.
template
operator
()
<
uint8_t
>();
break
;
case
proto
::
VarType
::
INT16
:
visitor
.
template
operator
()
<
int16_t
>();
break
;
default:
PADDLE_THROW
(
"Not supported
"
);
PADDLE_THROW
(
"Not supported
%d"
,
type
);
}
}
...
...
paddle/fluid/framework/details/fetch_op_handle.cc
浏览文件 @
1aeb6c5c
...
...
@@ -48,17 +48,18 @@ void FetchOpHandle::RunImpl() {
WaitInputVarGenerated
(
platform
::
CPUPlace
());
tensors_
.
resize
(
inputs_
.
size
());
auto
*
var_handle
=
static_cast
<
VarHandle
*>
(
inputs_
[
0
]);
auto
&
var_name
=
var_handle
->
name_
;
platform
::
CPUPlace
cpu
;
auto
&
scopes
=
*
local_scopes_
;
for
(
size_t
i
=
0
;
i
<
scopes
.
size
();
++
i
)
{
auto
&
scope
=
scopes
[
i
];
auto
*
var
=
scope
->
FindVar
(
kLocalExecScopeName
)
->
Get
<
Scope
*>
()
->
FindVar
(
var_name
);
for
(
size_t
i
=
0
;
i
<
inputs_
.
size
();
++
i
)
{
auto
*
var_handle
=
static_cast
<
VarHandle
*>
(
inputs_
[
i
]);
auto
&
scope
=
scopes
.
at
(
var_handle
->
scope_idx_
);
auto
*
var
=
scope
->
FindVar
(
kLocalExecScopeName
)
->
Get
<
Scope
*>
()
->
FindVar
(
var_handle
->
name_
);
PADDLE_ENFORCE_NOT_NULL
(
var
,
"Cannot find variable %s in execution scope"
,
var_name
);
var_handle
->
name_
);
auto
&
t
=
var
->
Get
<
framework
::
LoDTensor
>
();
if
(
platform
::
is_gpu_place
(
t
.
place
()))
{
#ifdef PADDLE_WITH_CUDA
...
...
paddle/fluid/framework/framework.proto
浏览文件 @
1aeb6c5c
...
...
@@ -103,6 +103,7 @@ message VarType {
FP64
=
6
;
// Tensor<size_t> is used in C++.
SIZE_T
=
19
;
UINT8
=
20
;
// Other types that may need additional descriptions
LOD_TENSOR
=
7
;
...
...
paddle/fluid/framework/lod_tensor_test.cc
浏览文件 @
1aeb6c5c
...
...
@@ -228,11 +228,12 @@ TEST(LoD, CheckAbsLoD) {
ASSERT_FALSE
(
CheckAbsLoD
(
abs_lod0
));
}
TEST
(
LoDTensor
,
RecordIO
)
{
template
<
typename
T
>
static
void
TestRecordIO
()
{
LoDTensor
tensor
;
int
*
tmp
=
tensor
.
mutable_data
<
int
>
(
make_ddim
({
4
,
5
}),
platform
::
CPUPlace
());
T
*
tmp
=
tensor
.
mutable_data
<
T
>
(
make_ddim
({
4
,
5
}),
platform
::
CPUPlace
());
for
(
int
i
=
0
;
i
<
20
;
++
i
)
{
tmp
[
i
]
=
i
;
tmp
[
i
]
=
static_cast
<
T
>
(
i
)
;
}
std
::
stringstream
*
stream
=
new
std
::
stringstream
();
...
...
@@ -247,7 +248,7 @@ TEST(LoDTensor, RecordIO) {
auto
assert_tensor_ok
=
[](
const
LoDTensor
&
tensor
)
{
for
(
int
i
=
0
;
i
<
20
;
++
i
)
{
ASSERT_EQ
(
tensor
.
data
<
int
>
()[
i
],
i
);
ASSERT_EQ
(
tensor
.
data
<
T
>
()[
i
],
static_cast
<
T
>
(
i
)
);
}
};
...
...
@@ -265,5 +266,13 @@ TEST(LoDTensor, RecordIO) {
}
}
TEST
(
LoDTensor
,
RecordIO
)
{
TestRecordIO
<
int
>
();
TestRecordIO
<
int16_t
>
();
TestRecordIO
<
uint8_t
>
();
TestRecordIO
<
float
>
();
TestRecordIO
<
double
>
();
}
}
// namespace framework
}
// namespace paddle
paddle/fluid/framework/operator.h
浏览文件 @
1aeb6c5c
...
...
@@ -33,7 +33,6 @@ limitations under the License. */
#include "paddle/fluid/framework/tensor.h"
#include "paddle/fluid/platform/device_context.h"
#include "paddle/fluid/platform/variant.h"
#include "paddle/utils/Error.h"
namespace
paddle
{
namespace
framework
{
...
...
paddle/fluid/operators/math/math_function.cc
浏览文件 @
1aeb6c5c
...
...
@@ -38,7 +38,9 @@ template struct SetConstant<platform::CPUDeviceContext, bool>;
template struct Transpose<platform::CPUDeviceContext, double, RANK>; \
template struct Transpose<platform::CPUDeviceContext, int, RANK>; \
template struct Transpose<platform::CPUDeviceContext, int64_t, RANK>; \
template struct Transpose<platform::CPUDeviceContext, bool, RANK>;
template struct Transpose<platform::CPUDeviceContext, bool, RANK>; \
template struct Transpose<platform::CPUDeviceContext, int16_t, RANK>; \
template struct Transpose<platform::CPUDeviceContext, uint8_t, RANK>;
DEFINE_CPU_TRANS
(
1
);
DEFINE_CPU_TRANS
(
2
);
...
...
paddle/fluid/platform/CMakeLists.txt
浏览文件 @
1aeb6c5c
proto_library
(
profiler_proto SRCS profiler.proto
)
proto_library
(
profiler_proto SRCS profiler.proto
DEPS framework_proto
)
py_proto_compile
(
profiler_py_proto SRCS profiler.proto
)
add_custom_target
(
profiler_py_proto_init ALL COMMAND
${
CMAKE_COMMAND
}
-E touch __init__.py
)
...
...
paddle/scripts/paddle_build.sh
浏览文件 @
1aeb6c5c
...
...
@@ -504,6 +504,7 @@ function main() {
;;
capi
)
cmake_gen
${
PYTHON_ABI
:-
""
}
build
gen_capi_package
;;
fluid_inference_lib
)
...
...
python/paddle/fluid/tests/unittests/CMakeLists.txt
浏览文件 @
1aeb6c5c
...
...
@@ -66,6 +66,7 @@ list(REMOVE_ITEM TEST_OPS test_fetch_var)
list
(
REMOVE_ITEM TEST_OPS test_parallel_op
)
list
(
REMOVE_ITEM TEST_OPS test_dynrnn_static_input
)
list
(
REMOVE_ITEM TEST_OPS test_dist_train
)
list
(
REMOVE_ITEM TEST_OPS test_network_with_dtype
)
# tests that can be bundled together in one python process for speed.
if
(
WITH_FAST_BUNDLE_TEST
)
...
...
@@ -83,6 +84,7 @@ py_test_modules(test_parallel_executor MODULES test_parallel_executor)
py_test_modules
(
test_warpctc_op MODULES test_warpctc_op ENVS FLAGS_warpctc_dir=
${
WARPCTC_LIB_DIR
}
)
py_test_modules
(
test_train_dyn_rnn MODULES test_dyn_rnn
)
py_test_modules
(
test_mul_op MODULES test_mul_op
)
py_test_modules
(
test_network_with_dtype MODULES test_network_with_dtype
)
# tests that need to be run in separate process.
py_test_modules
(
test_multihead_attention MODULES test_multihead_attention
)
...
...
python/paddle/fluid/tests/unittests/test_network_with_dtype.py
浏览文件 @
1aeb6c5c
...
...
@@ -24,33 +24,30 @@ BATCH_SIZE = 20
class
TestNetWithDtype
(
unittest
.
TestCase
):
def
set
_network
(
self
):
def
set
Up
(
self
):
self
.
dtype
=
"float64"
self
.
init_dtype
()
main
=
fluid
.
Program
()
with
fluid
.
program_guard
(
main
):
self
.
x
=
fluid
.
layers
.
data
(
name
=
'x'
,
shape
=
[
13
],
dtype
=
self
.
dtype
)
self
.
y
=
fluid
.
layers
.
data
(
name
=
'y'
,
shape
=
[
1
],
dtype
=
self
.
dtype
)
y_predict
=
fluid
.
layers
.
fc
(
input
=
self
.
x
,
size
=
1
,
act
=
None
)
cost
=
fluid
.
layers
.
square_error_cost
(
input
=
y_predict
,
label
=
self
.
y
)
def
run_net_on_place
(
self
,
place
):
main
=
fluid
.
Program
()
startup
=
fluid
.
Program
()
with
fluid
.
program_guard
(
main
,
startup
):
x
=
fluid
.
layers
.
data
(
name
=
'x'
,
shape
=
[
13
],
dtype
=
self
.
dtype
)
y
=
fluid
.
layers
.
data
(
name
=
'y'
,
shape
=
[
1
],
dtype
=
self
.
dtype
)
y_predict
=
fluid
.
layers
.
fc
(
input
=
x
,
size
=
1
,
act
=
None
)
cost
=
fluid
.
layers
.
square_error_cost
(
input
=
y_predict
,
label
=
y
)
avg_cost
=
fluid
.
layers
.
mean
(
cost
)
self
.
program
=
main
self
.
fetch_list
=
[
avg_cost
]
sgd_optimizer
=
fluid
.
optimizer
.
SGD
(
learning_rate
=
0.001
)
sgd_optimizer
.
minimize
(
avg_cost
)
def
run_net_on_place
(
self
,
place
):
fetch_list
=
[
avg_cost
]
train_reader
=
paddle
.
batch
(
paddle
.
dataset
.
uci_housing
.
train
(),
batch_size
=
BATCH_SIZE
)
feeder
=
fluid
.
DataFeeder
(
place
=
place
,
feed_list
=
[
self
.
x
,
self
.
y
])
feeder
=
fluid
.
DataFeeder
(
place
=
place
,
feed_list
=
[
x
,
y
])
exe
=
fluid
.
Executor
(
place
)
exe
.
run
(
fluid
.
default_startup_program
()
)
exe
.
run
(
startup
)
for
data
in
train_reader
():
exe
.
run
(
self
.
program
,
feed
=
feeder
.
feed
(
data
),
fetch_list
=
self
.
fetch_list
)
exe
.
run
(
main
,
feed
=
feeder
.
feed
(
data
),
fetch_list
=
fetch_list
)
# the main program is runable, the datatype is fully supported
break
...
...
@@ -58,14 +55,12 @@ class TestNetWithDtype(unittest.TestCase):
pass
def
test_cpu
(
self
):
self
.
set_network
()
place
=
fluid
.
CPUPlace
()
self
.
run_net_on_place
(
place
)
def
test_gpu
(
self
):
if
not
core
.
is_compiled_with_cuda
():
return
self
.
set_network
()
place
=
fluid
.
CUDAPlace
(
0
)
self
.
run_net_on_place
(
place
)
...
...
python/paddle/fluid/tests/unittests/test_parallel_executor.py
浏览文件 @
1aeb6c5c
...
...
@@ -775,7 +775,7 @@ class TestCRFModel(unittest.TestCase):
build_strategy
=
fluid
.
BuildStrategy
()
build_strategy
.
reduce_strategy
=
fluid
.
BuildStrategy
.
ReduceStrategy
.
Reduce
self
.
check_network_convergence
(
is_sparse
=
Fals
e
,
build_strategy
=
build_strategy
)
is_sparse
=
Tru
e
,
build_strategy
=
build_strategy
)
def
test_update_dense_parameter_reduce
(
self
):
build_strategy
=
fluid
.
BuildStrategy
()
...
...
@@ -849,8 +849,7 @@ class TestFetchOp(unittest.TestCase):
assert
not
math
.
isnan
(
np
.
sum
(
ret
[
i
]))
and
\
not
math
.
isinf
(
np
.
sum
(
ret
[
i
]))
@
unittest
.
skip
(
"this test is buggy"
)
def
test_feed
(
self
):
def
test_fetch_op
(
self
):
tst_reader
=
paddle
.
batch
(
flowers
.
test
(
use_xmap
=
False
),
batch_size
=
16
)
tst_reader_iter
=
tst_reader
()
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录