Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
d6f67f25
P
Paddle
项目概览
PaddlePaddle
/
Paddle
大约 1 年 前同步成功
通知
2299
Star
20931
Fork
5422
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
d6f67f25
编写于
12月 08, 2017
作者:
武
武毅
提交者:
GitHub
12月 08, 2017
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #6409 from typhoonzero/change_release_version
Change release version
上级
36fcc95c
ac18580b
变更
9
显示空白变更内容
内联
并排
Showing
9 changed file
with
200 addition
and
7 deletion
+200
-7
paddle/capi/Main.cpp
paddle/capi/Main.cpp
+7
-0
paddle/capi/Matrix.cpp
paddle/capi/Matrix.cpp
+1
-1
paddle/capi/error.cpp
paddle/capi/error.cpp
+32
-0
paddle/capi/error.h
paddle/capi/error.h
+7
-0
paddle/capi/examples/model_inference/multi_thread/CMakeLists.txt
...capi/examples/model_inference/multi_thread/CMakeLists.txt
+25
-4
paddle/capi/examples/model_inference/multi_thread/main_gpu.c
paddle/capi/examples/model_inference/multi_thread/main_gpu.c
+113
-0
paddle/capi/main.h
paddle/capi/main.h
+7
-0
python/CMakeLists.txt
python/CMakeLists.txt
+6
-0
python/setup.py.in
python/setup.py.in
+2
-2
未找到文件。
paddle/capi/Main.cpp
浏览文件 @
d6f67f25
...
...
@@ -43,4 +43,11 @@ paddle_error paddle_init(int argc, char** argv) {
isInit
=
true
;
return
kPD_NO_ERROR
;
}
paddle_error
paddle_init_thread
()
{
if
(
FLAGS_use_gpu
)
{
hl_init
(
FLAGS_gpu_id
);
}
return
kPD_NO_ERROR
;
}
}
paddle/capi/Matrix.cpp
浏览文件 @
d6f67f25
...
...
@@ -40,7 +40,7 @@ paddle_error paddle_matrix_destroy(paddle_matrix mat) {
paddle_error
paddle_matrix_set_row
(
paddle_matrix
mat
,
uint64_t
rowID
,
paddle_real
*
rowArray
)
{
if
(
mat
==
nullptr
)
return
kPD_NULLPTR
;
if
(
mat
==
nullptr
||
rowArray
==
nullptr
)
return
kPD_NULLPTR
;
auto
ptr
=
cast
(
mat
);
if
(
ptr
->
mat
==
nullptr
)
return
kPD_NULLPTR
;
if
(
rowID
>=
ptr
->
mat
->
getHeight
())
return
kPD_OUT_OF_RANGE
;
...
...
paddle/capi/error.cpp
0 → 100644
浏览文件 @
d6f67f25
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "error.h"
const
char
*
paddle_error_string
(
paddle_error
err
)
{
switch
(
err
)
{
case
kPD_NULLPTR
:
return
"nullptr error"
;
case
kPD_OUT_OF_RANGE
:
return
"out of range error"
;
case
kPD_PROTOBUF_ERROR
:
return
"protobuf error"
;
case
kPD_NOT_SUPPORTED
:
return
"not supported error"
;
case
kPD_UNDEFINED_ERROR
:
return
"undefined error"
;
default:
return
""
;
}
}
paddle/capi/error.h
浏览文件 @
d6f67f25
...
...
@@ -15,6 +15,8 @@ limitations under the License. */
#ifndef __PADDLE_CAPI_ERROR_H__
#define __PADDLE_CAPI_ERROR_H__
#include "config.h"
/**
* Error Type for Paddle API.
*/
...
...
@@ -27,4 +29,9 @@ typedef enum {
kPD_UNDEFINED_ERROR
=
-
1
,
}
paddle_error
;
/**
* Error string for Paddle API.
*/
PD_API
const
char
*
paddle_error_string
(
paddle_error
err
);
#endif
paddle/capi/examples/model_inference/multi_thread/CMakeLists.txt
浏览文件 @
d6f67f25
project
(
multi_thread
)
cmake_minimum_required
(
VERSION 2.8
)
aux_source_directory
(
. SRC_LIST
)
add_executable
(
${
PROJECT_NAME
}
${
SRC_LIST
}
)
find_package
(
Threads
)
if
(
NOT PADDLE_ROOT
)
set
(
PADDLE_ROOT $ENV{PADDLE_ROOT} CACHE PATH
"Paddle Path"
)
endif
()
if
(
PADDLE_ROOT
)
include_directories
(
${
PADDLE_ROOT
}
/include
)
link_directories
(
${
PADDLE_ROOT
}
/lib
)
endif
()
set
(
CPU_SRCS main.c
)
add_executable
(
${
PROJECT_NAME
}
${
CPU_SRCS
}
)
set_property
(
TARGET
${
PROJECT_NAME
}
PROPERTY C_STANDARD 99
)
target_link_libraries
(
${
PROJECT_NAME
}
-lpaddle_capi_shared
target_link_libraries
(
${
PROJECT_NAME
}
-lpaddle_capi_shared
${
CMAKE_THREAD_LIBS_INIT
}
)
find_package
(
CUDA QUIET
)
if
(
CUDA_FOUND
)
set
(
GPU_SRCS main_gpu.c
)
cuda_add_executable
(
${
PROJECT_NAME
}
_gpu
${
GPU_SRCS
}
)
set_property
(
TARGET
${
PROJECT_NAME
}
_gpu PROPERTY C_STANDARD 99
)
target_link_libraries
(
${
PROJECT_NAME
}
_gpu
-lpaddle_capi_shared
${
CMAKE_THREAD_LIBS_INIT
}
)
endif
(
CUDA_FOUND
)
paddle/capi/examples/model_inference/multi_thread/main_gpu.c
0 → 100644
浏览文件 @
d6f67f25
#include <paddle/capi.h>
#include <pthread.h>
#include <time.h>
#include "../common/common.h"
#define CONFIG_BIN "./trainer_config.bin"
#define NUM_THREAD 4
#define NUM_ITER 1000
pthread_mutex_t
mutex
;
/*
* @brief It is an simple inference example that runs multi-threads on a GPU.
* Each thread holds it own local gradient_machine but shares the same
* parameters.
* If you want to run on different GPUs, you need to launch
* multi-processes or set trainer_count > 1.
*/
void
*
thread_main
(
void
*
gm_ptr
)
{
// Initialize the thread environment of Paddle.
CHECK
(
paddle_init_thread
());
paddle_gradient_machine
machine
=
(
paddle_gradient_machine
)(
gm_ptr
);
// Create input arguments.
paddle_arguments
in_args
=
paddle_arguments_create_none
();
// Create input matrix.
paddle_matrix
mat
=
paddle_matrix_create
(
/* sample_num */
1
,
/* size */
784
,
/* useGPU */
true
);
// Create output arguments.
paddle_arguments
out_args
=
paddle_arguments_create_none
();
// Create output matrix.
paddle_matrix
prob
=
paddle_matrix_create_none
();
// CPU buffer to cache the input and output.
paddle_real
*
cpu_input
=
(
paddle_real
*
)
malloc
(
784
*
sizeof
(
paddle_real
));
paddle_real
*
cpu_output
=
(
paddle_real
*
)
malloc
(
10
*
sizeof
(
paddle_real
));
for
(
int
iter
=
0
;
iter
<
NUM_ITER
;
++
iter
)
{
// There is only one input layer of this network.
CHECK
(
paddle_arguments_resize
(
in_args
,
1
));
CHECK
(
paddle_arguments_set_value
(
in_args
,
0
,
mat
));
for
(
int
i
=
0
;
i
<
784
;
++
i
)
{
cpu_input
[
i
]
=
rand
()
/
((
float
)
RAND_MAX
);
}
CHECK
(
paddle_matrix_set_value
(
mat
,
cpu_input
));
CHECK
(
paddle_gradient_machine_forward
(
machine
,
in_args
,
out_args
,
/* isTrain */
false
));
CHECK
(
paddle_arguments_get_value
(
out_args
,
0
,
prob
));
CHECK
(
paddle_matrix_get_value
(
prob
,
cpu_output
));
pthread_mutex_lock
(
&
mutex
);
printf
(
"Prob: "
);
for
(
int
i
=
0
;
i
<
10
;
++
i
)
{
printf
(
"%.2f "
,
cpu_output
[
i
]);
}
printf
(
"
\n
"
);
pthread_mutex_unlock
(
&
mutex
);
}
CHECK
(
paddle_matrix_destroy
(
prob
));
CHECK
(
paddle_arguments_destroy
(
out_args
));
CHECK
(
paddle_matrix_destroy
(
mat
));
CHECK
(
paddle_arguments_destroy
(
in_args
));
CHECK
(
paddle_gradient_machine_destroy
(
machine
));
free
(
cpu_input
);
free
(
cpu_output
);
return
NULL
;
}
int
main
()
{
// Initalize Paddle
char
*
argv
[]
=
{
"--use_gpu=True"
};
CHECK
(
paddle_init
(
1
,
(
char
**
)
argv
));
// Reading config binary file. It is generated by `convert_protobin.sh`
long
size
;
void
*
buf
=
read_config
(
CONFIG_BIN
,
&
size
);
// Create a gradient machine for inference.
paddle_gradient_machine
machine
;
CHECK
(
paddle_gradient_machine_create_for_inference
(
&
machine
,
buf
,
(
int
)
size
));
CHECK
(
paddle_gradient_machine_randomize_param
(
machine
));
// Loading parameter. Uncomment the following line and change the directory.
// CHECK(paddle_gradient_machine_load_parameter_from_disk(machine,
// "./some_where_to_params"));
srand
(
time
(
0
));
pthread_mutex_init
(
&
mutex
,
NULL
);
pthread_t
threads
[
NUM_THREAD
];
for
(
int
i
=
0
;
i
<
NUM_THREAD
;
++
i
)
{
paddle_gradient_machine
thread_local_machine
;
CHECK
(
paddle_gradient_machine_create_shared_param
(
machine
,
buf
,
size
,
&
thread_local_machine
));
pthread_create
(
&
threads
[
i
],
NULL
,
thread_main
,
thread_local_machine
);
}
for
(
int
i
=
0
;
i
<
NUM_THREAD
;
++
i
)
{
pthread_join
(
threads
[
i
],
NULL
);
}
pthread_mutex_destroy
(
&
mutex
);
return
0
;
}
paddle/capi/main.h
浏览文件 @
d6f67f25
...
...
@@ -26,6 +26,13 @@ extern "C" {
*/
PD_API
paddle_error
paddle_init
(
int
argc
,
char
**
argv
);
/**
* Initialize the thread environment of Paddle.
* @note it is requisite for GPU runs but optional for CPU runs.
* For GPU runs, all threads will run on the same GPU devices.
*/
PD_API
paddle_error
paddle_init_thread
();
#ifdef __cplusplus
}
#endif
...
...
python/CMakeLists.txt
浏览文件 @
d6f67f25
...
...
@@ -33,6 +33,12 @@ if(WITH_MKLDNN)
list
(
APPEND MKL_DEPENDS mkldnn
)
endif
()
if
(
WITH_GPU
)
SET
(
PACKAGE_NAME
"paddlepaddle-gpu"
)
else
()
SET
(
PACKAGE_NAME
"paddlepaddle"
)
endif
()
configure_file
(
${
CMAKE_CURRENT_SOURCE_DIR
}
/setup.py.in
${
CMAKE_CURRENT_BINARY_DIR
}
/setup.py
)
...
...
python/setup.py.in
浏览文件 @
d6f67f25
...
...
@@ -5,7 +5,7 @@ class BinaryDistribution(Distribution):
return True
MAJOR = 0
MINOR = 1
0
MINOR = 1
1
PATCH = 0
RC = 0
ISTAGED = False
...
...
@@ -89,7 +89,7 @@ paddle_rt_libs = ['${WARPCTC_LIBRARIES}']
if '${MKL_SHARED_LIBS}'!= '':
paddle_rt_libs += '${MKL_SHARED_LIBS}'.split(';')
setup(name='
paddlepaddle
',
setup(name='
${PACKAGE_NAME}
',
version='${PADDLE_VERSION}',
description='Parallel Distributed Deep Learning',
install_requires=setup_requires,
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录