Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
59bb29db
P
Paddle
项目概览
PaddlePaddle
/
Paddle
接近 2 年 前同步成功
通知
2323
Star
20933
Fork
5424
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
59bb29db
编写于
2月 11, 2020
作者:
W
Wilber
提交者:
GitHub
2月 11, 2020
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
update. test=develop test=release/1.7 (#22518)
[cherry-pick] #22484 支持不依赖nccl进行编译。 多卡下,如果没有打开WITH_NCCL开关编译,则只能使用单卡
上级
14737e19
变更
15
隐藏空白更改
内联
并排
Showing
15 changed file
with
31 addition
and
30 deletion
+31
-30
paddle/fluid/operators/collective/c_gen_nccl_id_op.cc
paddle/fluid/operators/collective/c_gen_nccl_id_op.cc
+2
-2
paddle/fluid/operators/collective/c_reducescatter_op.cu.cc
paddle/fluid/operators/collective/c_reducescatter_op.cu.cc
+2
-2
paddle/fluid/operators/collective/c_sync_calc_stream_op.cc
paddle/fluid/operators/collective/c_sync_calc_stream_op.cc
+2
-2
paddle/fluid/operators/collective/c_sync_comm_stream_op.cc
paddle/fluid/operators/collective/c_sync_comm_stream_op.cc
+3
-3
paddle/fluid/operators/distributed/brpc/brpc_sendrecvop_utils.cc
...fluid/operators/distributed/brpc/brpc_sendrecvop_utils.cc
+3
-2
paddle/fluid/operators/distributed/grpc/grpc_serde.cc
paddle/fluid/operators/distributed/grpc/grpc_serde.cc
+3
-3
paddle/fluid/operators/distributed/grpc/grpc_variable_response.cc
...luid/operators/distributed/grpc/grpc_variable_response.cc
+1
-1
paddle/fluid/operators/distributed/sendrecvop_utils.cc
paddle/fluid/operators/distributed/sendrecvop_utils.cc
+1
-1
paddle/fluid/operators/distributed_ops/allreduce_op.h
paddle/fluid/operators/distributed_ops/allreduce_op.h
+2
-2
paddle/fluid/operators/distributed_ops/broadcast_op.cu.cc
paddle/fluid/operators/distributed_ops/broadcast_op.cu.cc
+2
-2
paddle/fluid/platform/collective_helper.cc
paddle/fluid/platform/collective_helper.cc
+1
-1
paddle/fluid/platform/collective_helper.h
paddle/fluid/platform/collective_helper.h
+1
-1
paddle/fluid/platform/enforce.h
paddle/fluid/platform/enforce.h
+3
-3
paddle/fluid/platform/nccl_helper.h
paddle/fluid/platform/nccl_helper.h
+1
-1
paddle/fluid/pybind/pybind.cc
paddle/fluid/pybind/pybind.cc
+4
-4
未找到文件。
paddle/fluid/operators/collective/c_gen_nccl_id_op.cc
浏览文件 @
59bb29db
...
@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
...
@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
See the License for the specific language governing permissions and
limitations under the License. */
limitations under the License. */
#if defined(PADDLE_WITH_
CUDA) && !defined(_WIN32
)
#if defined(PADDLE_WITH_
NCCL
)
#include <nccl.h>
#include <nccl.h>
#endif
#endif
...
@@ -27,7 +27,7 @@ limitations under the License. */
...
@@ -27,7 +27,7 @@ limitations under the License. */
#include "paddle/fluid/operators/distributed/distributed.h"
#include "paddle/fluid/operators/distributed/distributed.h"
#include "paddle/fluid/operators/distributed/request_handler_impl.h"
#include "paddle/fluid/operators/distributed/request_handler_impl.h"
#if defined(PADDLE_WITH_
CUDA) && !defined(_WIN32
)
#if defined(PADDLE_WITH_
NCCL
)
#include "paddle/fluid/platform/nccl_helper.h"
#include "paddle/fluid/platform/nccl_helper.h"
#endif
#endif
...
...
paddle/fluid/operators/collective/c_reducescatter_op.cu.cc
浏览文件 @
59bb29db
...
@@ -14,7 +14,7 @@ limitations under the License. */
...
@@ -14,7 +14,7 @@ limitations under the License. */
#include "paddle/fluid/operators/collective/c_reducescatter_op.h"
#include "paddle/fluid/operators/collective/c_reducescatter_op.h"
#if defined(PADDLE_WITH_
CUDA) && !defined(_WIN32
)
#if defined(PADDLE_WITH_
NCCL
)
#include "paddle/fluid/platform/collective_helper.h"
#include "paddle/fluid/platform/collective_helper.h"
#include "paddle/fluid/platform/nccl_helper.h"
#include "paddle/fluid/platform/nccl_helper.h"
#endif
#endif
...
@@ -26,7 +26,7 @@ template <typename T>
...
@@ -26,7 +26,7 @@ template <typename T>
class
CReduceScatterOpCUDAKernel
:
public
framework
::
OpKernel
<
T
>
{
class
CReduceScatterOpCUDAKernel
:
public
framework
::
OpKernel
<
T
>
{
public:
public:
void
Compute
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
void
Compute
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
#if defined(PADDLE_WITH_
CUDA) && !defined(_WIN32
)
#if defined(PADDLE_WITH_
NCCL
)
auto
in
=
ctx
.
Input
<
framework
::
Tensor
>
(
"X"
);
auto
in
=
ctx
.
Input
<
framework
::
Tensor
>
(
"X"
);
auto
out
=
ctx
.
Output
<
framework
::
Tensor
>
(
"Out"
);
auto
out
=
ctx
.
Output
<
framework
::
Tensor
>
(
"Out"
);
...
...
paddle/fluid/operators/collective/c_sync_calc_stream_op.cc
浏览文件 @
59bb29db
...
@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
...
@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
See the License for the specific language governing permissions and
limitations under the License. */
limitations under the License. */
#if defined(PADDLE_WITH_
CUDA) && !defined(_WIN32
)
#if defined(PADDLE_WITH_
NCCL
)
#include <nccl.h>
#include <nccl.h>
#endif
#endif
...
@@ -21,7 +21,7 @@ limitations under the License. */
...
@@ -21,7 +21,7 @@ limitations under the License. */
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/op_registry.h"
#if defined(PADDLE_WITH_
CUDA) && !defined(_WIN32
)
#if defined(PADDLE_WITH_
NCCL
)
#include "paddle/fluid/platform/collective_helper.h"
#include "paddle/fluid/platform/collective_helper.h"
#endif
#endif
...
...
paddle/fluid/operators/collective/c_sync_comm_stream_op.cc
浏览文件 @
59bb29db
...
@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
...
@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
See the License for the specific language governing permissions and
limitations under the License. */
limitations under the License. */
#if defined(PADDLE_WITH_
CUDA) && !defined(_WIN32
)
#if defined(PADDLE_WITH_
NCCL
)
#include <nccl.h>
#include <nccl.h>
#endif
#endif
...
@@ -20,7 +20,7 @@ limitations under the License. */
...
@@ -20,7 +20,7 @@ limitations under the License. */
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/op_registry.h"
#if defined(PADDLE_WITH_
CUDA) && !defined(_WIN32
)
#if defined(PADDLE_WITH_
NCCL
)
#include "paddle/fluid/platform/collective_helper.h"
#include "paddle/fluid/platform/collective_helper.h"
#include "paddle/fluid/platform/nccl_helper.h"
#include "paddle/fluid/platform/nccl_helper.h"
#endif
#endif
...
@@ -41,7 +41,7 @@ class CSyncCommStreamOp : public framework::OperatorBase {
...
@@ -41,7 +41,7 @@ class CSyncCommStreamOp : public framework::OperatorBase {
PADDLE_ENFORCE_EQ
(
is_gpu_place
(
place
),
true
,
PADDLE_ENFORCE_EQ
(
is_gpu_place
(
place
),
true
,
"Sync stream op can run on gpu place only for now."
);
"Sync stream op can run on gpu place only for now."
);
#if defined(PADDLE_WITH_
CUDA) && !defined(_WIN32
)
#if defined(PADDLE_WITH_
NCCL
)
int
ring_id
=
Attr
<
int
>
(
"ring_id"
);
int
ring_id
=
Attr
<
int
>
(
"ring_id"
);
auto
stream
=
auto
stream
=
platform
::
NCCLCommContext
::
Instance
().
Get
(
ring_id
,
place
)
->
stream
();
platform
::
NCCLCommContext
::
Instance
().
Get
(
ring_id
,
place
)
->
stream
();
...
...
paddle/fluid/operators/distributed/brpc/brpc_sendrecvop_utils.cc
浏览文件 @
59bb29db
...
@@ -12,11 +12,12 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
...
@@ -12,11 +12,12 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
See the License for the specific language governing permissions and
limitations under the License. */
limitations under the License. */
#ifdef PADDLE_WITH_
CUDA
#ifdef PADDLE_WITH_
NCCL
#include <nccl.h>
#include <nccl.h>
#endif
#endif
#include <sys/time.h>
#include <sys/time.h>
#include <limits>
#include <limits>
#include <memory>
#include <thread> // NOLINT
#include <thread> // NOLINT
#include "paddle/fluid/framework/data_type.h"
#include "paddle/fluid/framework/data_type.h"
...
@@ -139,7 +140,7 @@ void SerializeToIOBuf(const std::string& name, framework::Variable* var,
...
@@ -139,7 +140,7 @@ void SerializeToIOBuf(const std::string& name, framework::Variable* var,
}
else
if
(
var
->
IsType
<
framework
::
SelectedRows
>
())
{
}
else
if
(
var
->
IsType
<
framework
::
SelectedRows
>
())
{
request
->
set_type
(
::
sendrecv
::
SELECTED_ROWS
);
request
->
set_type
(
::
sendrecv
::
SELECTED_ROWS
);
payload
.
reset
(
new
TensorPayload
(
GetSelectedRowsPayload
(
var
,
ctx
,
request
)));
payload
.
reset
(
new
TensorPayload
(
GetSelectedRowsPayload
(
var
,
ctx
,
request
)));
#ifdef PADDLE_WITH_
CUDA
#ifdef PADDLE_WITH_
NCCL
}
else
if
(
var
->
IsType
<
ncclUniqueId
>
())
{
}
else
if
(
var
->
IsType
<
ncclUniqueId
>
())
{
request
->
set_type
(
::
sendrecv
::
NCCL_ID
);
request
->
set_type
(
::
sendrecv
::
NCCL_ID
);
const
ncclUniqueId
&
uid
=
var
->
Get
<
ncclUniqueId
>
();
const
ncclUniqueId
&
uid
=
var
->
Get
<
ncclUniqueId
>
();
...
...
paddle/fluid/operators/distributed/grpc/grpc_serde.cc
浏览文件 @
59bb29db
...
@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
...
@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
See the License for the specific language governing permissions and
limitations under the License. */
limitations under the License. */
#ifdef PADDLE_WITH_
CUDA
#ifdef PADDLE_WITH_
NCCL
#include <nccl.h>
#include <nccl.h>
#endif
#endif
#include <limits>
#include <limits>
...
@@ -68,7 +68,7 @@ void SerializeToByteBuffer(const std::string& name, framework::Variable* var,
...
@@ -68,7 +68,7 @@ void SerializeToByteBuffer(const std::string& name, framework::Variable* var,
}
else
if
(
var
->
IsType
<
framework
::
SelectedRows
>
())
{
}
else
if
(
var
->
IsType
<
framework
::
SelectedRows
>
())
{
request
.
set_type
(
::
sendrecv
::
SELECTED_ROWS
);
request
.
set_type
(
::
sendrecv
::
SELECTED_ROWS
);
payload
=
new
TensorPayload
(
GetSelectedRowsPayload
(
var
,
ctx
,
&
request
));
payload
=
new
TensorPayload
(
GetSelectedRowsPayload
(
var
,
ctx
,
&
request
));
#ifdef PADDLE_WITH_
CUDA
#ifdef PADDLE_WITH_
NCCL
}
else
if
(
var
->
IsType
<
ncclUniqueId
>
())
{
}
else
if
(
var
->
IsType
<
ncclUniqueId
>
())
{
request
.
set_type
(
::
sendrecv
::
NCCL_ID
);
request
.
set_type
(
::
sendrecv
::
NCCL_ID
);
#endif
#endif
...
@@ -85,7 +85,7 @@ void SerializeToByteBuffer(const std::string& name, framework::Variable* var,
...
@@ -85,7 +85,7 @@ void SerializeToByteBuffer(const std::string& name, framework::Variable* var,
e
.
WriteRawBytes
(
std
::
string
(
header
.
data
(),
header
.
size
()));
e
.
WriteRawBytes
(
std
::
string
(
header
.
data
(),
header
.
size
()));
// NCCLID is copied directly to the message, return bytebuffer
// NCCLID is copied directly to the message, return bytebuffer
// with only one slice if serializing NCCLID.
// with only one slice if serializing NCCLID.
#ifdef PADDLE_WITH_
CUDA
#ifdef PADDLE_WITH_
NCCL
if
(
var
->
IsType
<
ncclUniqueId
>
())
{
if
(
var
->
IsType
<
ncclUniqueId
>
())
{
e
.
WriteVarlengthBeginning
(
VarMsg
::
kSerializedFieldNumber
,
e
.
WriteVarlengthBeginning
(
VarMsg
::
kSerializedFieldNumber
,
NCCL_UNIQUE_ID_BYTES
);
NCCL_UNIQUE_ID_BYTES
);
...
...
paddle/fluid/operators/distributed/grpc/grpc_variable_response.cc
浏览文件 @
59bb29db
...
@@ -15,7 +15,7 @@
...
@@ -15,7 +15,7 @@
#include <string>
#include <string>
#include <utility>
#include <utility>
#include <vector>
#include <vector>
#ifdef PADDLE_WITH_
CUDA
#ifdef PADDLE_WITH_
NCCL
#include <nccl.h>
#include <nccl.h>
#endif
#endif
...
...
paddle/fluid/operators/distributed/sendrecvop_utils.cc
浏览文件 @
59bb29db
...
@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
...
@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
See the License for the specific language governing permissions and
limitations under the License. */
limitations under the License. */
#ifdef PADDLE_WITH_
CUDA
#ifdef PADDLE_WITH_
NCCL
#include <nccl.h>
#include <nccl.h>
#endif
#endif
#include <memory>
#include <memory>
...
...
paddle/fluid/operators/distributed_ops/allreduce_op.h
浏览文件 @
59bb29db
...
@@ -21,7 +21,7 @@ limitations under the License. */
...
@@ -21,7 +21,7 @@ limitations under the License. */
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/op_registry.h"
#if defined(PADDLE_WITH_
CUDA) && !defined(_WIN32
)
#if defined(PADDLE_WITH_
NCCL
)
#include "paddle/fluid/platform/nccl_helper.h"
#include "paddle/fluid/platform/nccl_helper.h"
#endif
#endif
...
@@ -35,7 +35,7 @@ class AllReduceOpKernel : public framework::OpKernel<T> {
...
@@ -35,7 +35,7 @@ class AllReduceOpKernel : public framework::OpKernel<T> {
auto
place
=
ctx
.
GetPlace
();
auto
place
=
ctx
.
GetPlace
();
PADDLE_ENFORCE
(
is_gpu_place
(
place
),
PADDLE_ENFORCE
(
is_gpu_place
(
place
),
"AllReduce op can run on gpu place only for now."
);
"AllReduce op can run on gpu place only for now."
);
#if defined(PADDLE_WITH_
CUDA) && !defined(_WIN32
)
#if defined(PADDLE_WITH_
NCCL
)
auto
&
dev_ctx
=
ctx
.
template
device_context
<
platform
::
CUDADeviceContext
>();
auto
&
dev_ctx
=
ctx
.
template
device_context
<
platform
::
CUDADeviceContext
>();
auto
in
=
ctx
.
Input
<
framework
::
Tensor
>
(
"X"
);
auto
in
=
ctx
.
Input
<
framework
::
Tensor
>
(
"X"
);
auto
out
=
ctx
.
Output
<
framework
::
Tensor
>
(
"Out"
);
auto
out
=
ctx
.
Output
<
framework
::
Tensor
>
(
"Out"
);
...
...
paddle/fluid/operators/distributed_ops/broadcast_op.cu.cc
浏览文件 @
59bb29db
...
@@ -20,7 +20,7 @@ limitations under the License. */
...
@@ -20,7 +20,7 @@ limitations under the License. */
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/op_registry.h"
#if defined(PADDLE_WITH_
CUDA) && !defined(_WIN32
)
#if defined(PADDLE_WITH_
NCCL
)
#include "paddle/fluid/platform/nccl_helper.h"
#include "paddle/fluid/platform/nccl_helper.h"
#endif
#endif
...
@@ -37,7 +37,7 @@ class NCCLBroadcastOpKernel : public framework::OpKernel<T> {
...
@@ -37,7 +37,7 @@ class NCCLBroadcastOpKernel : public framework::OpKernel<T> {
PADDLE_ENFORCE
(
platform
::
is_gpu_place
(
ctx
.
GetPlace
()),
PADDLE_ENFORCE
(
platform
::
is_gpu_place
(
ctx
.
GetPlace
()),
"The place of ExecutionContext should be CUDAPlace."
);
"The place of ExecutionContext should be CUDAPlace."
);
#if defined(PADDLE_WITH_
CUDA) && !defined(_WIN32
)
#if defined(PADDLE_WITH_
NCCL
)
int
dev_id
=
boost
::
get
<
platform
::
CUDAPlace
>
(
ctx
.
GetPlace
()).
device
;
int
dev_id
=
boost
::
get
<
platform
::
CUDAPlace
>
(
ctx
.
GetPlace
()).
device
;
int
root_dev_id
=
ctx
.
Attr
<
int
>
(
"root"
);
int
root_dev_id
=
ctx
.
Attr
<
int
>
(
"root"
);
...
...
paddle/fluid/platform/collective_helper.cc
浏览文件 @
59bb29db
...
@@ -12,7 +12,7 @@
...
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// See the License for the specific language governing permissions and
// limitations under the License.
// limitations under the License.
#if defined(PADDLE_WITH_
CUDA) && !defined(_WIN32
)
#if defined(PADDLE_WITH_
NCCL
)
#include "paddle/fluid/platform/collective_helper.h"
#include "paddle/fluid/platform/collective_helper.h"
#include <memory>
#include <memory>
...
...
paddle/fluid/platform/collective_helper.h
浏览文件 @
59bb29db
...
@@ -14,7 +14,7 @@
...
@@ -14,7 +14,7 @@
#pragma once
#pragma once
#if defined(PADDLE_WITH_
CUDA) && !defined(_WIN32
)
#if defined(PADDLE_WITH_
NCCL
)
#include <map>
#include <map>
#include <memory>
#include <memory>
#include <string>
#include <string>
...
...
paddle/fluid/platform/enforce.h
浏览文件 @
59bb29db
...
@@ -48,7 +48,7 @@ limitations under the License. */
...
@@ -48,7 +48,7 @@ limitations under the License. */
#include "paddle/fluid/platform/dynload/cublas.h"
#include "paddle/fluid/platform/dynload/cublas.h"
#include "paddle/fluid/platform/dynload/cudnn.h"
#include "paddle/fluid/platform/dynload/cudnn.h"
#include "paddle/fluid/platform/dynload/curand.h"
#include "paddle/fluid/platform/dynload/curand.h"
#if !defined(__APPLE__) &&
!defined(_WIN32
)
#if !defined(__APPLE__) &&
defined(PADDLE_WITH_NCCL
)
#include "paddle/fluid/platform/dynload/nccl.h"
#include "paddle/fluid/platform/dynload/nccl.h"
#endif // __APPLE__
#endif // __APPLE__
#endif // PADDLE_WITH_CUDA
#endif // PADDLE_WITH_CUDA
...
@@ -462,7 +462,7 @@ inline void throw_on_error(cublasStatus_t stat, const std::string& msg) {
...
@@ -462,7 +462,7 @@ inline void throw_on_error(cublasStatus_t stat, const std::string& msg) {
#endif
#endif
}
}
#if !defined(__APPLE__) &&
!defined(_WIN32
)
#if !defined(__APPLE__) &&
defined(PADDLE_WITH_NCCL
)
inline
bool
is_error
(
ncclResult_t
nccl_result
)
{
inline
bool
is_error
(
ncclResult_t
nccl_result
)
{
return
nccl_result
!=
ncclSuccess
;
return
nccl_result
!=
ncclSuccess
;
}
}
...
@@ -502,7 +502,7 @@ DEFINE_CUDA_STATUS_TYPE(curandStatus_t, CURAND_STATUS_SUCCESS);
...
@@ -502,7 +502,7 @@ DEFINE_CUDA_STATUS_TYPE(curandStatus_t, CURAND_STATUS_SUCCESS);
DEFINE_CUDA_STATUS_TYPE
(
cudnnStatus_t
,
CUDNN_STATUS_SUCCESS
);
DEFINE_CUDA_STATUS_TYPE
(
cudnnStatus_t
,
CUDNN_STATUS_SUCCESS
);
DEFINE_CUDA_STATUS_TYPE
(
cublasStatus_t
,
CUBLAS_STATUS_SUCCESS
);
DEFINE_CUDA_STATUS_TYPE
(
cublasStatus_t
,
CUBLAS_STATUS_SUCCESS
);
#if !defined(__APPLE__) &&
!defined(_WIN32
)
#if !defined(__APPLE__) &&
defined(PADDLE_WITH_NCCL
)
DEFINE_CUDA_STATUS_TYPE
(
ncclResult_t
,
ncclSuccess
);
DEFINE_CUDA_STATUS_TYPE
(
ncclResult_t
,
ncclSuccess
);
#endif
#endif
...
...
paddle/fluid/platform/nccl_helper.h
浏览文件 @
59bb29db
...
@@ -12,7 +12,7 @@
...
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// See the License for the specific language governing permissions and
// limitations under the License.
// limitations under the License.
#if
ndef _WIN32
#if
def PADDLE_WITH_NCCL
#pragma once
#pragma once
#include <stdio.h>
#include <stdio.h>
...
...
paddle/fluid/pybind/pybind.cc
浏览文件 @
59bb29db
...
@@ -68,7 +68,7 @@ limitations under the License. */
...
@@ -68,7 +68,7 @@ limitations under the License. */
#include "paddle/fluid/pybind/ir.h"
#include "paddle/fluid/pybind/ir.h"
#include "paddle/fluid/pybind/pybind_boost_headers.h"
#include "paddle/fluid/pybind/pybind_boost_headers.h"
#if
ndef _WIN32
#if
def PADDLE_WITH_NCCL
#include "paddle/fluid/pybind/nccl_wrapper_py.h"
#include "paddle/fluid/pybind/nccl_wrapper_py.h"
#endif
#endif
#include "paddle/fluid/framework/data_type.h"
#include "paddle/fluid/framework/data_type.h"
...
@@ -78,7 +78,7 @@ limitations under the License. */
...
@@ -78,7 +78,7 @@ limitations under the License. */
#include "paddle/fluid/pybind/tensor_py.h"
#include "paddle/fluid/pybind/tensor_py.h"
#include "paddle/fluid/string/to_string.h"
#include "paddle/fluid/string/to_string.h"
#ifdef PADDLE_WITH_CUDA
#ifdef PADDLE_WITH_CUDA
#if
ndef _WIN32
#if
def PADDLE_WITH_NCCL
#include "paddle/fluid/operators/nccl/nccl_gpu_common.h"
#include "paddle/fluid/operators/nccl/nccl_gpu_common.h"
#endif
#endif
#include "paddle/fluid/platform/cuda_profiler.h"
#include "paddle/fluid/platform/cuda_profiler.h"
...
@@ -926,7 +926,7 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -926,7 +926,7 @@ All parameter, weight, gradient are variables in Paddle.
.
def
(
"get_lod_tensor_array"
,
.
def
(
"get_lod_tensor_array"
,
[](
Variable
&
self
)
{
return
self
.
GetMutable
<
LoDTensorArray
>
();
},
[](
Variable
&
self
)
{
return
self
.
GetMutable
<
LoDTensorArray
>
();
},
py
::
return_value_policy
::
reference
)
py
::
return_value_policy
::
reference
)
#if (defined(PADDLE_WITH_
CUDA) && !defined(_WIN32
))
#if (defined(PADDLE_WITH_
NCCL
))
.
def
(
"get_communicator"
,
.
def
(
"get_communicator"
,
[](
Variable
&
self
)
->
platform
::
Communicator
*
{
[](
Variable
&
self
)
->
platform
::
Communicator
*
{
return
self
.
GetMutable
<
platform
::
Communicator
>
();
return
self
.
GetMutable
<
platform
::
Communicator
>
();
...
@@ -1174,7 +1174,7 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -1174,7 +1174,7 @@ All parameter, weight, gradient are variables in Paddle.
#endif
#endif
});;
});;
// clang-format on
// clang-format on
#if
(defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
)
#if
defined(PADDLE_WITH_NCCL
)
py
::
class_
<
platform
::
Communicator
>
(
m
,
"Communicator"
).
def
(
py
::
init
<>
());
py
::
class_
<
platform
::
Communicator
>
(
m
,
"Communicator"
).
def
(
py
::
init
<>
());
#endif
#endif
py
::
class_
<
platform
::
CUDAPlace
>
(
m
,
"CUDAPlace"
,
R"DOC(
py
::
class_
<
platform
::
CUDAPlace
>
(
m
,
"CUDAPlace"
,
R"DOC(
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录