Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
a4f7696a
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
a4f7696a
编写于
9月 27, 2018
作者:
T
typhoonzero
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Revert "Some trivial optimization (#13530)"
This reverts commit
1d91a49d
.
上级
85362e98
变更
10
隐藏空白更改
内联
并排
Showing
10 changed file
with
44 addition
and
116 deletion
+44
-116
paddle/fluid/framework/op_info.h
paddle/fluid/framework/op_info.h
+6
-11
paddle/fluid/operators/read_op.cc
paddle/fluid/operators/read_op.cc
+0
-2
paddle/fluid/operators/sgd_op.cu
paddle/fluid/operators/sgd_op.cu
+20
-21
paddle/fluid/operators/shrink_rnn_memory_op.cc
paddle/fluid/operators/shrink_rnn_memory_op.cc
+8
-21
paddle/fluid/platform/device_context.cc
paddle/fluid/platform/device_context.cc
+0
-5
paddle/fluid/platform/device_context.h
paddle/fluid/platform/device_context.h
+0
-5
paddle/fluid/platform/for_range.h
paddle/fluid/platform/for_range.h
+10
-29
paddle/fluid/platform/gpu_info.cc
paddle/fluid/platform/gpu_info.cc
+0
-17
paddle/fluid/platform/gpu_info.h
paddle/fluid/platform/gpu_info.h
+0
-3
python/paddle/fluid/layers/io.py
python/paddle/fluid/layers/io.py
+0
-2
未找到文件。
paddle/fluid/framework/op_info.h
浏览文件 @
a4f7696a
...
@@ -38,31 +38,27 @@ struct OpInfo {
...
@@ -38,31 +38,27 @@ struct OpInfo {
OpAttrChecker
*
checker_
{
nullptr
};
OpAttrChecker
*
checker_
{
nullptr
};
InferVarTypeFN
infer_var_type_
;
InferVarTypeFN
infer_var_type_
;
InferShapeFN
infer_shape_
;
InferShapeFN
infer_shape_
;
std
::
string
op_type_
;
bool
HasOpProtoAndChecker
()
const
{
bool
HasOpProtoAndChecker
()
const
{
return
proto_
!=
nullptr
&&
checker_
!=
nullptr
;
return
proto_
!=
nullptr
&&
checker_
!=
nullptr
;
}
}
const
proto
::
OpProto
&
Proto
()
const
{
const
proto
::
OpProto
&
Proto
()
const
{
PADDLE_ENFORCE_NOT_NULL
(
proto_
,
"Operator %s Proto has not been registered"
,
PADDLE_ENFORCE_NOT_NULL
(
proto_
,
"Operator Proto has not been registered"
);
op_type_
);
PADDLE_ENFORCE
(
proto_
->
IsInitialized
(),
PADDLE_ENFORCE
(
proto_
->
IsInitialized
(),
"Operator %s Proto must be initialized in op info"
,
"Operator Proto must be initialized in op info"
);
op_type_
);
return
*
proto_
;
return
*
proto_
;
}
}
const
OpCreator
&
Creator
()
const
{
const
OpCreator
&
Creator
()
const
{
PADDLE_ENFORCE_NOT_NULL
(
PADDLE_ENFORCE_NOT_NULL
(
creator_
,
creator_
,
"Operator %s Creator has not been registered"
,
op_type_
);
"Operator Creator has not been registered"
);
return
creator_
;
return
creator_
;
}
}
const
GradOpMakerFN
&
GradOpMaker
()
const
{
const
GradOpMakerFN
&
GradOpMaker
()
const
{
PADDLE_ENFORCE_NOT_NULL
(
grad_op_maker_
,
PADDLE_ENFORCE_NOT_NULL
(
grad_op_maker_
,
"Operator %s GradOpMaker has not been registered."
,
"Operator GradOpMaker has not been registered."
);
op_type_
);
return
grad_op_maker_
;
return
grad_op_maker_
;
}
}
...
@@ -77,9 +73,8 @@ class OpInfoMap {
...
@@ -77,9 +73,8 @@ class OpInfoMap {
return
map_
.
find
(
op_type
)
!=
map_
.
end
();
return
map_
.
find
(
op_type
)
!=
map_
.
end
();
}
}
void
Insert
(
const
std
::
string
&
type
,
OpInfo
info
)
{
void
Insert
(
const
std
::
string
&
type
,
const
OpInfo
&
info
)
{
PADDLE_ENFORCE
(
!
Has
(
type
),
"Operator %s has been registered"
,
type
);
PADDLE_ENFORCE
(
!
Has
(
type
),
"Operator %s has been registered"
,
type
);
info
.
op_type_
=
type
;
map_
.
insert
({
type
,
info
});
map_
.
insert
({
type
,
info
});
}
}
...
...
paddle/fluid/operators/read_op.cc
浏览文件 @
a4f7696a
...
@@ -45,12 +45,10 @@ class ReadInferVarType : public framework::VarTypeInference {
...
@@ -45,12 +45,10 @@ class ReadInferVarType : public framework::VarTypeInference {
framework
::
VarDesc
*
reader
=
block
->
FindVarRecursive
(
reader_name
);
framework
::
VarDesc
*
reader
=
block
->
FindVarRecursive
(
reader_name
);
auto
dtypes
=
reader
->
GetDataTypes
();
auto
dtypes
=
reader
->
GetDataTypes
();
PADDLE_ENFORCE_EQ
(
dtypes
.
size
(),
out_names
.
size
());
PADDLE_ENFORCE_EQ
(
dtypes
.
size
(),
out_names
.
size
());
auto
lod_levels
=
reader
->
GetLoDLevels
();
for
(
size_t
i
=
0
;
i
<
dtypes
.
size
();
++
i
)
{
for
(
size_t
i
=
0
;
i
<
dtypes
.
size
();
++
i
)
{
framework
::
VarDesc
&
out
=
block
->
FindRecursiveOrCreateVar
(
out_names
[
i
]);
framework
::
VarDesc
&
out
=
block
->
FindRecursiveOrCreateVar
(
out_names
[
i
]);
out
.
SetType
(
framework
::
proto
::
VarType
::
LOD_TENSOR
);
out
.
SetType
(
framework
::
proto
::
VarType
::
LOD_TENSOR
);
out
.
SetDataType
(
dtypes
[
i
]);
out
.
SetDataType
(
dtypes
[
i
]);
out
.
SetLoDLevel
(
lod_levels
[
i
]);
}
}
}
}
};
};
...
...
paddle/fluid/operators/sgd_op.cu
浏览文件 @
a4f7696a
...
@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
...
@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
See the License for the specific language governing permissions and
limitations under the License. */
limitations under the License. */
#
include <algorithm>
#
define EIGEN_USE_GPU
#include "paddle/fluid/operators/sgd_op.h"
#include "paddle/fluid/operators/sgd_op.h"
#include "paddle/fluid/platform/cuda_primitives.h"
#include "paddle/fluid/platform/cuda_primitives.h"
...
@@ -33,21 +33,22 @@ __global__ void SGDKernel(const T* g, const T* p, const T* learning_rate,
...
@@ -33,21 +33,22 @@ __global__ void SGDKernel(const T* g, const T* p, const T* learning_rate,
}
}
}
}
template
<
typename
T
>
template
<
typename
T
,
int
block_size
>
__global__
void
SparseSGDFunctorKernel
(
const
T
*
selected_rows
,
__global__
void
SparseSGDFunctorKernel
(
const
T
*
selected_rows
,
const
int64_t
*
rows
,
const
int64_t
*
rows
,
const
T
*
learning_rate
,
T
*
tensor_out
,
const
T
*
learning_rate
,
T
*
tensor_out
,
int64_t
row_numel
,
int64_t
limit
)
{
int64_t
row_numel
)
{
for
(
int64_t
i
=
blockIdx
.
x
;
i
<
limit
;
i
+=
gridDim
.
x
)
{
const
int
ty
=
blockIdx
.
y
;
const
T
*
selected_rows_ptr
=
selected_rows
+
i
*
row_numel
;
int
tid
=
threadIdx
.
x
;
T
*
tensor_out_ptr
=
tensor_out
+
rows
[
i
]
*
row_numel
;
for
(
int64_t
index
=
threadIdx
.
x
;
index
<
row_numel
;
index
+=
blockDim
.
x
)
{
selected_rows
+=
ty
*
row_numel
;
// Since index in rows of SelectedRows can be duplicate, we have to use
tensor_out
+=
rows
[
ty
]
*
row_numel
;
// Atomic Operation to avoid concurrent write error.
paddle
::
platform
::
CudaAtomicAdd
(
for
(
int
index
=
tid
;
index
<
row_numel
;
index
+=
block_size
)
{
tensor_out_ptr
+
index
,
// Since index in rows of SelectedRows can be duplicate, we have to use
-
1.0
*
learning_rate
[
0
]
*
selected_rows_ptr
[
index
]);
// Atomic Operation to avoid concurrent write error.
}
paddle
::
platform
::
CudaAtomicAdd
(
tensor_out
+
index
,
-
1.0
*
learning_rate
[
0
]
*
selected_rows
[
index
]);
}
}
}
}
}
// namespace
}
// namespace
...
@@ -96,15 +97,13 @@ class SGDOpCUDAKernel : public framework::OpKernel<T> {
...
@@ -96,15 +97,13 @@ class SGDOpCUDAKernel : public framework::OpKernel<T> {
auto
*
in_data
=
in_value
.
data
<
T
>
();
auto
*
in_data
=
in_value
.
data
<
T
>
();
auto
*
out_data
=
param_out
->
data
<
T
>
();
auto
*
out_data
=
param_out
->
data
<
T
>
();
const
int
kThreadsPerBlock
=
256
;
const
int
block_size
=
256
;
int
thread_x
=
kThreadsPerBlock
;
dim3
threads
(
block_size
,
1
);
int
max_threads
=
ctx
.
cuda_device_context
().
GetMaxPhysicalThreadCount
();
dim3
grid
(
1
,
in_rows
.
size
());
int
max_blocks
=
std
::
max
(
max_threads
/
kThreadsPerBlock
,
1
);
SparseSGDFunctorKernel
<
T
,
256
><<<
grid
,
threads
,
0
,
ctx
.
cuda_device_context
().
stream
()
>>>
(
SparseSGDFunctorKernel
<<<
max_blocks
,
thread_x
,
0
,
ctx
.
cuda_device_context
().
stream
()
>>>
(
in_data
,
in_rows
.
CUDAData
(
ctx
.
GetPlace
()),
learning_rate
->
data
<
T
>
(),
in_data
,
in_rows
.
CUDAData
(
ctx
.
GetPlace
()),
learning_rate
->
data
<
T
>
(),
out_data
,
in_row_numel
,
in_rows
.
size
()
);
out_data
,
in_row_numel
);
}
else
{
}
else
{
PADDLE_THROW
(
"Unsupported Variable Type of Grad"
);
PADDLE_THROW
(
"Unsupported Variable Type of Grad"
);
...
...
paddle/fluid/operators/shrink_rnn_memory_op.cc
浏览文件 @
a4f7696a
...
@@ -52,26 +52,16 @@ class ShrinkRNNMemoryOp : public ArrayOp {
...
@@ -52,26 +52,16 @@ class ShrinkRNNMemoryOp : public ArrayOp {
size_t
height
=
dst_num_rows
;
size_t
height
=
dst_num_rows
;
// do shrink for the top level LoD
// do shrink for the top level LoD
if
(
x_tensor
.
lod
().
size
()
>
0
&&
if
(
x_tensor
.
lod
().
size
()
>
0
&&
x_tensor
.
lod
()[
0
].
size
()
>
static_cast
<
size_t
>
(
dst_num_rows
))
{
x_tensor
.
lod
()[
0
].
size
()
>
static_cast
<
size_t
>
(
dst_num_rows
))
{
if
(
x_tensor
.
lod
().
size
()
>
1
)
{
// MultiLevel LoD
auto
lod_offset
=
framework
::
GetSubLoDAndAbsoluteOffset
(
x_tensor
.
lod
(),
0
,
auto
lod_offset
=
framework
::
GetSubLoDAndAbsoluteOffset
(
dst_num_rows
,
0
);
x_tensor
.
lod
(),
0
,
dst_num_rows
,
0
);
height
=
lod_offset
.
second
.
second
;
height
=
lod_offset
.
second
.
second
;
auto
out_lod
=
out_tensor
.
mutable_lod
();
auto
out_lod
=
out_tensor
.
mutable_lod
();
framework
::
AppendLoD
(
out_lod
,
lod_offset
.
first
);
framework
::
AppendLoD
(
out_lod
,
lod_offset
.
first
);
}
else
{
// Shrink LoD
auto
lod_item
=
x_tensor
.
lod
()[
0
];
lod_item
.
resize
(
dst_num_rows
+
1
);
out_tensor
.
set_lod
({
lod_item
});
const
auto
&
const_lod_item
=
lod_item
;
height
=
const_lod_item
.
back
();
}
}
}
if
(
height
!=
0
)
{
if
(
dst_num_rows
!=
0
)
{
out_tensor
.
mutable_data
(
place
,
x_tensor
.
type
());
out_tensor
.
mutable_data
(
place
,
x_tensor
.
type
());
auto
dev_ctx
=
platform
::
DeviceContextPool
::
Instance
().
Get
(
place
);
auto
dev_ctx
=
platform
::
DeviceContextPool
::
Instance
().
Get
(
place
);
framework
::
TensorCopy
(
x_tensor
.
Slice
(
0
,
height
),
place
,
*
dev_ctx
,
framework
::
TensorCopy
(
x_tensor
.
Slice
(
0
,
height
),
place
,
*
dev_ctx
,
...
@@ -144,11 +134,8 @@ class ShrinkRNNMemoryGradOp : public ArrayOp {
...
@@ -144,11 +134,8 @@ class ShrinkRNNMemoryGradOp : public ArrayOp {
}
else
{
}
else
{
auto
&
dout_tensor
=
dout_var
->
Get
<
framework
::
LoDTensor
>
();
auto
&
dout_tensor
=
dout_var
->
Get
<
framework
::
LoDTensor
>
();
auto
height
=
dout_tensor
.
dims
()[
0
];
auto
height
=
dout_tensor
.
dims
()[
0
];
if
(
height
!=
0
)
{
auto
slice
=
dx_tensor
.
Slice
(
0
,
static_cast
<
int
>
(
height
));
auto
slice
=
dx_tensor
.
Slice
(
0
,
static_cast
<
int
>
(
height
));
framework
::
TensorCopy
(
dout_tensor
,
dout_tensor
.
place
(),
dev_ctx
,
&
slice
);
framework
::
TensorCopy
(
dout_tensor
,
dout_tensor
.
place
(),
dev_ctx
,
&
slice
);
}
if
(
dx_tensor
.
dims
()[
0
]
>
height
)
{
if
(
dx_tensor
.
dims
()[
0
]
>
height
)
{
auto
rest_tensor
=
dx_tensor
.
Slice
(
auto
rest_tensor
=
dx_tensor
.
Slice
(
static_cast
<
int
>
(
height
),
static_cast
<
int
>
(
dx_tensor
.
dims
()[
0
]));
static_cast
<
int
>
(
height
),
static_cast
<
int
>
(
dx_tensor
.
dims
()[
0
]));
...
...
paddle/fluid/platform/device_context.cc
浏览文件 @
a4f7696a
...
@@ -201,7 +201,6 @@ CUDADeviceContext::CUDADeviceContext(CUDAPlace place)
...
@@ -201,7 +201,6 @@ CUDADeviceContext::CUDADeviceContext(CUDAPlace place)
compute_capability
=
GetCUDAComputeCapability
(
place_
.
device
);
compute_capability
=
GetCUDAComputeCapability
(
place_
.
device
);
multi_process
=
GetCUDAMultiProcessors
(
place_
.
device
);
multi_process
=
GetCUDAMultiProcessors
(
place_
.
device
);
max_threads_per_mp
=
GetCUDAMaxThreadsPerMultiProcessor
(
place_
.
device
);
max_threads_per_mp
=
GetCUDAMaxThreadsPerMultiProcessor
(
place_
.
device
);
grid_max_dims_
=
GpuMaxGridDim
(
place_
.
device
);
PADDLE_ENFORCE
(
cudaStreamCreate
(
&
stream_
));
PADDLE_ENFORCE
(
cudaStreamCreate
(
&
stream_
));
eigen_stream_
.
reset
(
new
EigenCudaStreamDevice
());
eigen_stream_
.
reset
(
new
EigenCudaStreamDevice
());
eigen_stream_
->
Reinitialize
(
&
stream_
,
place
);
eigen_stream_
->
Reinitialize
(
&
stream_
,
place
);
...
@@ -240,10 +239,6 @@ int CUDADeviceContext::GetMaxPhysicalThreadCount() const {
...
@@ -240,10 +239,6 @@ int CUDADeviceContext::GetMaxPhysicalThreadCount() const {
return
multi_process
*
max_threads_per_mp
;
return
multi_process
*
max_threads_per_mp
;
}
}
std
::
tuple
<
int
,
int
,
int
>
CUDADeviceContext
::
GetMaxGridDims
()
const
{
return
grid_max_dims_
;
}
Eigen
::
GpuDevice
*
CUDADeviceContext
::
eigen_device
()
const
{
Eigen
::
GpuDevice
*
CUDADeviceContext
::
eigen_device
()
const
{
return
eigen_device_
.
get
();
return
eigen_device_
.
get
();
}
}
...
...
paddle/fluid/platform/device_context.h
浏览文件 @
a4f7696a
...
@@ -13,7 +13,6 @@ limitations under the License. */
...
@@ -13,7 +13,6 @@ limitations under the License. */
#include <memory>
#include <memory>
#include <mutex> // NOLINT
#include <mutex> // NOLINT
#include <string>
#include <string>
#include <tuple>
#include <unordered_map>
#include <unordered_map>
#include <vector>
#include <vector>
...
@@ -92,8 +91,6 @@ class CUDADeviceContext : public DeviceContext {
...
@@ -92,8 +91,6 @@ class CUDADeviceContext : public DeviceContext {
/*! \brief Return the max physical thread count in the device context */
/*! \brief Return the max physical thread count in the device context */
int
GetMaxPhysicalThreadCount
()
const
;
int
GetMaxPhysicalThreadCount
()
const
;
std
::
tuple
<
int
,
int
,
int
>
GetMaxGridDims
()
const
;
/*! \brief Return eigen device in the device context. */
/*! \brief Return eigen device in the device context. */
Eigen
::
GpuDevice
*
eigen_device
()
const
;
Eigen
::
GpuDevice
*
eigen_device
()
const
;
...
@@ -138,8 +135,6 @@ class CUDADeviceContext : public DeviceContext {
...
@@ -138,8 +135,6 @@ class CUDADeviceContext : public DeviceContext {
cudaStream_t
stream_
;
cudaStream_t
stream_
;
cublasHandle_t
cublas_handle_
;
cublasHandle_t
cublas_handle_
;
std
::
tuple
<
int
,
int
,
int
>
grid_max_dims_
;
int
compute_capability
;
int
compute_capability
;
int
multi_process
;
int
multi_process
;
int
max_threads_per_mp
;
int
max_threads_per_mp
;
...
...
paddle/fluid/platform/for_range.h
浏览文件 @
a4f7696a
...
@@ -48,54 +48,35 @@ __global__ static void ForRangeElemwiseOpGridIsOne(Function func) {
...
@@ -48,54 +48,35 @@ __global__ static void ForRangeElemwiseOpGridIsOne(Function func) {
}
}
template
<
typename
Function
>
template
<
typename
Function
>
__global__
static
void
ForRangeElemwiseOp
(
Function
func
,
size_
t
limit
)
{
__global__
static
void
ForRangeElemwiseOp
(
Function
func
,
in
t
limit
)
{
size_t
idx
=
static_cast
<
size_t
>
(
blockIdx
.
x
*
blockDim
.
x
+
threadIdx
.
x
);
size_t
idx
=
static_cast
<
size_t
>
(
blockIdx
.
x
*
blockDim
.
x
+
threadIdx
.
x
);
if
(
idx
<
limit
)
{
if
(
idx
<
limit
)
{
func
(
idx
);
func
(
idx
);
}
}
}
}
template
<
typename
Function
>
__global__
static
void
ForRangeElemwiseOpGridLarge
(
Function
func
,
size_t
limit
,
int
grid_dim
)
{
size_t
idx
=
static_cast
<
size_t
>
(
blockIdx
.
x
*
blockDim
.
x
+
threadIdx
.
x
);
while
(
idx
<
limit
)
{
func
(
idx
);
idx
+=
grid_dim
;
}
}
template
<
>
template
<
>
struct
ForRange
<
CUDADeviceContext
>
{
struct
ForRange
<
CUDADeviceContext
>
{
ForRange
(
const
CUDADeviceContext
&
dev_ctx
,
size_t
limit
)
ForRange
(
const
CUDADeviceContext
&
dev_ctx
,
size_t
limit
)
:
dev_ctx_
(
dev_ctx
),
limit_
(
limit
)
{}
:
dev_ctx_
(
dev_ctx
),
limit_
(
static_cast
<
int
>
(
limit
)
)
{}
template
<
typename
Function
>
template
<
typename
Function
>
inline
void
operator
()(
Function
func
)
const
{
inline
void
operator
()(
Function
func
)
const
{
constexpr
int
num_threads
=
1024
;
constexpr
int
num_threads
=
1024
;
int
block_size
=
limit_
<=
num_threads
?
limit_
:
num_threads
;
int
block_size
=
limit_
<=
num_threads
?
limit_
:
num_threads
;
size_t
grid_size
=
(
limit_
+
num_threads
-
1
)
/
num_threads
;
int
grid_size
=
(
limit_
+
num_threads
-
1
)
/
num_threads
;
int
max_grid_dim
=
std
::
get
<
0
>
(
dev_ctx_
.
GetMaxGridDims
());
if
(
grid_size
==
1
)
{
ForRangeElemwiseOpGridIsOne
<<<
1
,
block_size
,
0
,
dev_ctx_
.
stream
()
>>>
(
if
(
grid_size
<
max_grid_dim
)
{
func
);
int
grid_size_int
=
static_cast
<
int
>
(
grid_size
);
if
(
grid_size
==
1
)
{
ForRangeElemwiseOpGridIsOne
<<<
1
,
block_size
,
0
,
dev_ctx_
.
stream
()
>>>
(
func
);
}
else
{
ForRangeElemwiseOp
<<<
grid_size_int
,
block_size
,
0
,
dev_ctx_
.
stream
()
>>>
(
func
,
limit_
);
}
}
else
{
}
else
{
ForRangeElemwiseOpGridLarge
<<<
max_grid_dim
,
block_size
,
0
,
ForRangeElemwiseOp
<<<
grid_size
,
block_size
,
0
,
dev_ctx_
.
stream
()
>>>
(
dev_ctx_
.
stream
()
>>>
(
func
,
limit_
,
func
,
limit_
);
max_grid_dim
);
}
}
}
}
const
CUDADeviceContext
&
dev_ctx_
;
const
CUDADeviceContext
&
dev_ctx_
;
size_
t
limit_
;
in
t
limit_
;
};
};
#endif
#endif
...
...
paddle/fluid/platform/gpu_info.cc
浏览文件 @
a4f7696a
...
@@ -152,22 +152,5 @@ void GpuMemsetAsync(void *dst, int value, size_t count, cudaStream_t stream) {
...
@@ -152,22 +152,5 @@ void GpuMemsetAsync(void *dst, int value, size_t count, cudaStream_t stream) {
PADDLE_ENFORCE
(
cudaMemsetAsync
(
dst
,
value
,
count
,
stream
),
PADDLE_ENFORCE
(
cudaMemsetAsync
(
dst
,
value
,
count
,
stream
),
"cudaMemsetAsync failed in paddle::platform::GpuMemsetAsync"
);
"cudaMemsetAsync failed in paddle::platform::GpuMemsetAsync"
);
}
}
std
::
tuple
<
int
,
int
,
int
>
GpuMaxGridDim
(
int
id
)
{
std
::
tuple
<
int
,
int
,
int
>
result
;
PADDLE_ENFORCE
(
cudaDeviceGetAttribute
(
&
std
::
get
<
0
>
(
result
),
cudaDevAttrMaxBlockDimX
,
id
),
"cudaDeviceGetAttribute failed in "
"cudaDevAttrMaxBlockDim"
);
PADDLE_ENFORCE
(
cudaDeviceGetAttribute
(
&
std
::
get
<
1
>
(
result
),
cudaDevAttrMaxBlockDimY
,
id
),
"cudaDeviceGetAttribute failed in "
"cudaDevAttrMaxBlockDim"
);
PADDLE_ENFORCE
(
cudaDeviceGetAttribute
(
&
std
::
get
<
2
>
(
result
),
cudaDevAttrMaxBlockDimZ
,
id
),
"cudaDeviceGetAttribute failed in "
"cudaDevAttrMaxBlockDim"
);
return
result
;
}
}
// namespace platform
}
// namespace platform
}
// namespace paddle
}
// namespace paddle
paddle/fluid/platform/gpu_info.h
浏览文件 @
a4f7696a
...
@@ -19,7 +19,6 @@ limitations under the License. */
...
@@ -19,7 +19,6 @@ limitations under the License. */
#include <cuda_runtime.h>
#include <cuda_runtime.h>
#include <stddef.h>
#include <stddef.h>
#include <string>
#include <string>
#include <tuple>
namespace
paddle
{
namespace
paddle
{
namespace
platform
{
namespace
platform
{
...
@@ -73,8 +72,6 @@ void GpuMemcpyPeerSync(void *dst, int dst_device, const void *src,
...
@@ -73,8 +72,6 @@ void GpuMemcpyPeerSync(void *dst, int dst_device, const void *src,
//! Set memory dst with value count size asynchronously
//! Set memory dst with value count size asynchronously
void
GpuMemsetAsync
(
void
*
dst
,
int
value
,
size_t
count
,
cudaStream_t
stream
);
void
GpuMemsetAsync
(
void
*
dst
,
int
value
,
size_t
count
,
cudaStream_t
stream
);
std
::
tuple
<
int
,
int
,
int
>
GpuMaxGridDim
(
int
id
);
}
// namespace platform
}
// namespace platform
}
// namespace paddle
}
// namespace paddle
...
...
python/paddle/fluid/layers/io.py
浏览文件 @
a4f7696a
...
@@ -311,7 +311,6 @@ def _copy_reader_var_(block, var):
...
@@ -311,7 +311,6 @@ def _copy_reader_var_(block, var):
new_var
=
block
.
create_var
(
name
=
var
.
name
,
type
=
core
.
VarDesc
.
VarType
.
READER
)
new_var
=
block
.
create_var
(
name
=
var
.
name
,
type
=
core
.
VarDesc
.
VarType
.
READER
)
new_var
.
desc
.
set_shapes
(
var
.
desc
.
shapes
())
new_var
.
desc
.
set_shapes
(
var
.
desc
.
shapes
())
new_var
.
desc
.
set_dtypes
(
var
.
desc
.
dtypes
())
new_var
.
desc
.
set_dtypes
(
var
.
desc
.
dtypes
())
new_var
.
desc
.
set_lod_levels
(
var
.
desc
.
lod_levels
())
new_var
.
persistable
=
True
new_var
.
persistable
=
True
return
new_var
return
new_var
...
@@ -633,7 +632,6 @@ def py_reader(capacity,
...
@@ -633,7 +632,6 @@ def py_reader(capacity,
})
})
startup_var
.
desc
.
set_dtypes
(
dtypes
)
startup_var
.
desc
.
set_dtypes
(
dtypes
)
startup_var
.
desc
.
set_lod_levels
(
lod_levels
)
startup_var
.
persistable
=
True
startup_var
.
persistable
=
True
main_prog_var
=
_copy_reader_var_
(
default_main_program
().
current_block
(),
main_prog_var
=
_copy_reader_var_
(
default_main_program
().
current_block
(),
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录