Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
c4535732
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
c4535732
编写于
6月 14, 2018
作者:
T
tensor-tang
浏览文件
操作
浏览文件
下载
差异文件
Merge remote-tracking branch 'ups/develop' into fix
上级
85c3bfc1
9169b3b8
变更
9
隐藏空白更改
内联
并排
Showing
9 changed file
with
50 addition
and
12 deletion
+50
-12
cmake/external/openblas.cmake
cmake/external/openblas.cmake
+2
-0
paddle/fluid/framework/parallel_executor.cc
paddle/fluid/framework/parallel_executor.cc
+15
-5
paddle/fluid/inference/io.cc
paddle/fluid/inference/io.cc
+4
-0
paddle/fluid/operators/detail/grpc_server.cc
paddle/fluid/operators/detail/grpc_server.cc
+2
-1
paddle/fluid/operators/math/blas.h
paddle/fluid/operators/math/blas.h
+15
-0
paddle/fluid/operators/math/math_function.h
paddle/fluid/operators/math/math_function.h
+2
-0
paddle/fluid/platform/nccl_helper.h
paddle/fluid/platform/nccl_helper.h
+5
-0
paddle/math/MathFunctions.h
paddle/math/MathFunctions.h
+4
-5
python/paddle/fluid/layers/nn.py
python/paddle/fluid/layers/nn.py
+1
-1
未找到文件。
cmake/external/openblas.cmake
浏览文件 @
c4535732
...
...
@@ -29,6 +29,8 @@ IF(NOT ${CBLAS_FOUND})
"
${
CBLAS_INSTALL_DIR
}
/lib/
${
CMAKE_STATIC_LIBRARY_PREFIX
}
openblas
${
CMAKE_STATIC_LIBRARY_SUFFIX
}
"
CACHE FILEPATH
"openblas library."
FORCE
)
ADD_DEFINITIONS
(
-DPADDLE_USE_OPENBLAS
)
SET
(
OPENBLAS_CC
"
${
CMAKE_C_COMPILER
}
-Wno-unused-but-set-variable -Wno-unused-variable"
)
SET
(
OPENBLAS_COMMIT
"v0.2.20"
)
...
...
paddle/fluid/framework/parallel_executor.cc
浏览文件 @
c4535732
...
...
@@ -145,9 +145,9 @@ void ParallelExecutor::BCastParamsToGPUs(
auto
&
dims
=
main_tensor
.
dims
();
if
(
paddle
::
platform
::
is_gpu_place
(
main_tensor
.
place
()))
{
#ifdef PADDLE_WITH_CUDA
std
::
vector
<
void
*>
buffers
;
size_t
numel
=
main_tensor
.
numel
();
ncclDataType_t
data_type
=
platform
::
ToNCCLDataType
(
main_tensor
.
type
());
platform
::
NCCLGroupGuard
guard
;
for
(
size_t
i
=
0
;
i
<
member_
->
places_
.
size
();
++
i
)
{
auto
place
=
member_
->
places_
[
i
];
void
*
buffer
;
...
...
@@ -159,11 +159,21 @@ void ParallelExecutor::BCastParamsToGPUs(
t
->
Resize
(
dims
);
buffer
=
t
->
mutable_data
(
place
,
main_tensor
.
type
());
}
auto
&
nccl_ctx
=
member_
->
nccl_ctxs_
->
at
(
place
);
platform
::
dynload
::
ncclBcast
(
buffer
,
numel
,
data_type
,
0
,
nccl_ctx
.
comm_
,
nccl_ctx
.
stream
());
buffers
.
push_back
(
buffer
);
}
member_
->
nccl_ctxs_
->
WaitAll
();
PADDLE_ENFORCE_EQ
(
member_
->
places_
.
size
(),
buffers
.
size
(),
"variables' buffer size to bcast NOT equal to places"
);
{
platform
::
NCCLGroupGuard
guard
;
for
(
size_t
i
=
0
;
i
<
member_
->
places_
.
size
();
++
i
)
{
auto
&
nccl_ctx
=
member_
->
nccl_ctxs_
->
at
(
member_
->
places_
[
i
]);
platform
::
dynload
::
ncclBcast
(
buffers
[
i
],
numel
,
data_type
,
0
,
nccl_ctx
.
comm_
,
nccl_ctx
.
stream
());
}
member_
->
nccl_ctxs_
->
WaitAll
();
}
#else
PADDLE_THROW
(
"Not compiled with CUDA"
);
#endif
...
...
paddle/fluid/inference/io.cc
浏览文件 @
c4535732
...
...
@@ -20,16 +20,20 @@ limitations under the License. */
#include "paddle/fluid/framework/block_desc.h"
#include "paddle/fluid/framework/feed_fetch_type.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/math/blas.h"
#include "paddle/fluid/pybind/pybind.h"
DEFINE_string
(
devices
,
""
,
"The devices to be used which is joined by comma."
);
DEFINE_bool
(
init_p2p
,
false
,
"Whether to init p2p."
);
DEFINE_int32
(
math_num_threads
,
1
,
"Number of threads used to run math functions."
);
namespace
paddle
{
namespace
inference
{
void
Init
(
const
std
::
vector
<
std
::
string
>
argv
)
{
framework
::
InitGflags
(
argv
);
operators
::
math
::
SetNumThreads
(
FLAGS_math_num_threads
);
// init devices
std
::
vector
<
int
>
devices
;
std
::
string
token
;
...
...
paddle/fluid/operators/detail/grpc_server.cc
浏览文件 @
c4535732
...
...
@@ -169,7 +169,8 @@ class RequestPrefetch final : public RequestBase {
auto
scope
=
request_
->
GetMutableLocalScope
();
auto
invar
=
scope
->
FindVar
(
in_var_name
);
framework
::
Variable
*
outvar
=
scope
->
FindVar
(
out_var_name
);
// out var must be created in local scope!
framework
::
Variable
*
outvar
=
scope
->
Var
(
out_var_name
);
request_handler_
->
Handle
(
in_var_name
,
scope
,
invar
,
&
outvar
,
out_var_name
);
...
...
paddle/fluid/operators/math/blas.h
浏览文件 @
c4535732
...
...
@@ -20,13 +20,16 @@
#ifdef PADDLE_WITH_MKLML
#include <mkl_cblas.h>
#include <mkl_lapacke.h>
#include <mkl_service.h>
#include <mkl_vml_functions.h>
#endif
#ifdef PADDLE_USE_OPENBLAS
#include <cblas.h>
#ifdef LAPACK_FOUND
#include <lapacke.h>
#endif
#endif
#ifndef LAPACK_FOUND
extern
"C"
{
...
...
@@ -46,6 +49,18 @@ namespace paddle {
namespace
operators
{
namespace
math
{
static
void
SetNumThreads
(
int
num_threads
)
{
#ifdef PADDLE_USE_OPENBLAS
int
real_num_threads
=
num_threads
>
1
?
num_threads
:
1
;
openblas_set_num_threads
(
real_num_threads
);
#elif defined(PADDLE_WITH_MKLML)
int
real_num_threads
=
num_threads
>
1
?
num_threads
:
1
;
mkl_set_num_threads
(
real_num_threads
);
#else
PADDLE_ENFORCE
(
false
,
"To be implemented."
);
#endif
}
/**
* Matrix Descriptor of a memory buffer.
*
...
...
paddle/fluid/operators/math/math_function.h
浏览文件 @
c4535732
...
...
@@ -21,8 +21,10 @@ limitations under the License. */
#ifdef PADDLE_USE_OPENBLAS
#include <cblas.h>
#ifdef LAPACK_FOUND
#include <lapacke.h>
#endif
#endif
#ifndef LAPACK_FOUND
extern
"C"
{
...
...
paddle/fluid/platform/nccl_helper.h
浏览文件 @
c4535732
...
...
@@ -41,6 +41,11 @@ inline ncclDataType_t ToNCCLDataType(std::type_index type) {
}
}
// NOTE(minqiyang): according to the ncclGroupEnd documentations:
// https://docs.nvidia.com/deeplearning/sdk/nccl-api/ncclapidoc.html,
// ncclGroupEnd will wait for all communicators to be initialized, which will
// cause blocking problem when a runtime_error was thrown, so try only guard
// NCCL actions when use it.
class
NCCLGroupGuard
{
public:
static
std
::
mutex
&
NCCLMutex
()
{
...
...
paddle/math/MathFunctions.h
浏览文件 @
c4535732
...
...
@@ -12,8 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifndef MATHFUNCTIONS_H_
#define MATHFUNCTIONS_H_
#pragma once
#ifdef PADDLE_WITH_MKLML
#include <mkl_cblas.h>
...
...
@@ -21,7 +20,7 @@ limitations under the License. */
#include <mkl_vml_functions.h>
#endif
#if
defined(PADDLE_USE_VECLIB)
#if
def PADDLE_USE_VECLIB
extern
"C"
{
#include <cblas.h>
#include <clapack.h>
...
...
@@ -30,8 +29,10 @@ extern "C" {
#ifdef PADDLE_USE_OPENBLAS
#include <cblas.h>
#ifdef LAPACK_FOUND
#include <lapacke.h>
#endif
#endif
#ifndef LAPACK_FOUND
extern
"C"
{
...
...
@@ -126,5 +127,3 @@ template <class T>
void
vTanh
(
const
int
n
,
const
T
*
a
,
T
*
r
);
}
// namespace paddle
#endif // MATHFUNCTIONS_H_
python/paddle/fluid/layers/nn.py
浏览文件 @
c4535732
...
...
@@ -4242,7 +4242,7 @@ def random_crop(x, shape, seed=None):
seed_out
=
helper
.
create_tmp_variable
(
dtype
=
"int64"
)
helper
.
append_op
(
type
=
"random_crop"
,
inputs
=
{
"X"
:
input
,
inputs
=
{
"X"
:
x
,
"Seed"
:
seed
},
outputs
=
{
"Out"
:
out
,
"SeedOut"
:
seed_out
},
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录