Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
266955a9
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
266955a9
编写于
2月 09, 2022
作者:
C
Chen Weihang
提交者:
GitHub
2月 09, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
move stream into pten (#39392)
上级
b12e7a17
变更
11
隐藏空白更改
内联
并排
Showing
11 changed file
with
31 addition
and
33 deletion
+31
-33
paddle/fluid/framework/tensor.h
paddle/fluid/framework/tensor.h
+1
-1
paddle/fluid/memory/allocation/allocator_facade.cc
paddle/fluid/memory/allocation/allocator_facade.cc
+2
-2
paddle/fluid/memory/allocation/allocator_facade.h
paddle/fluid/memory/allocation/allocator_facade.h
+4
-4
paddle/fluid/memory/malloc.cc
paddle/fluid/memory/malloc.cc
+3
-3
paddle/fluid/memory/malloc.h
paddle/fluid/memory/malloc.h
+3
-3
paddle/fluid/memory/stream_safe_cuda_alloc_test.cu
paddle/fluid/memory/stream_safe_cuda_alloc_test.cu
+11
-11
paddle/fluid/operators/memcpy_h2d_op.h
paddle/fluid/operators/memcpy_h2d_op.h
+2
-2
paddle/pten/core/dense_tensor.h
paddle/pten/core/dense_tensor.h
+1
-1
paddle/pten/core/dense_tensor.inl
paddle/pten/core/dense_tensor.inl
+1
-1
paddle/pten/core/dense_tensor_impl.cc
paddle/pten/core/dense_tensor_impl.cc
+1
-1
paddle/pten/core/stream.h
paddle/pten/core/stream.h
+2
-4
未找到文件。
paddle/fluid/framework/tensor.h
浏览文件 @
266955a9
...
...
@@ -28,7 +28,7 @@ limitations under the License. */
#include "paddle/fluid/platform/device_context.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/platform/place.h"
#include "paddle/
fluid/platform/stream
/stream.h"
#include "paddle/
pten/core
/stream.h"
#include "paddle/pten/core/dense_tensor.h"
...
...
paddle/fluid/memory/allocation/allocator_facade.cc
浏览文件 @
266955a9
...
...
@@ -940,7 +940,7 @@ uint64_t AllocatorFacade::Release(const platform::Place& place) {
}
std
::
shared_ptr
<
pten
::
Allocation
>
AllocatorFacade
::
AllocShared
(
const
platform
::
Place
&
place
,
size_t
size
,
const
p
latform
::
Stream
&
stream
)
{
const
platform
::
Place
&
place
,
size_t
size
,
const
p
ten
::
Stream
&
stream
)
{
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
PADDLE_ENFORCE_EQ
(
FLAGS_use_stream_safe_cuda_allocator
,
true
,
...
...
@@ -965,7 +965,7 @@ std::shared_ptr<pten::Allocation> AllocatorFacade::AllocShared(
bool
AllocatorFacade
::
InSameStream
(
const
std
::
shared_ptr
<
pten
::
Allocation
>&
allocation
,
const
p
latform
::
Stream
&
stream
)
{
const
p
ten
::
Stream
&
stream
)
{
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
PADDLE_ENFORCE_EQ
(
FLAGS_use_stream_safe_cuda_allocator
,
true
,
...
...
paddle/fluid/memory/allocation/allocator_facade.h
浏览文件 @
266955a9
...
...
@@ -22,7 +22,7 @@
#include "paddle/fluid/platform/device/gpu/gpu_info.h"
#endif
#include "paddle/fluid/platform/place.h"
#include "paddle/
fluid/platform/stream
/stream.h"
#include "paddle/
pten/core
/stream.h"
namespace
paddle
{
namespace
memory
{
...
...
@@ -71,13 +71,13 @@ class AllocatorFacade {
std
::
shared_ptr
<
Allocation
>
AllocShared
(
const
platform
::
Place
&
place
,
size_t
size
,
const
p
latform
::
Stream
&
stream
);
const
p
ten
::
Stream
&
stream
);
bool
InSameStream
(
const
std
::
shared_ptr
<
Allocation
>&
allocation
,
const
p
latform
::
Stream
&
stream
);
const
p
ten
::
Stream
&
stream
);
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
// TODO(zhiqiu): change gpuStream_t to p
latform
::Stream if needed.
// TODO(zhiqiu): change gpuStream_t to p
ten
::Stream if needed.
AllocationPtr
Alloc
(
const
platform
::
Place
&
place
,
size_t
size
,
const
gpuStream_t
&
stream
);
uint64_t
Release
(
const
platform
::
CUDAPlace
&
place
,
const
gpuStream_t
&
stream
);
...
...
paddle/fluid/memory/malloc.cc
浏览文件 @
266955a9
...
...
@@ -16,7 +16,7 @@ limitations under the License. */
#include "paddle/fluid/memory/allocation/allocator_facade.h"
#include "paddle/fluid/platform/place.h"
#include "paddle/
fluid/platform/stream
/stream.h"
#include "paddle/
pten/core
/stream.h"
namespace
paddle
{
namespace
memory
{
...
...
@@ -36,13 +36,13 @@ uint64_t Release(const platform::Place& place) {
std
::
shared_ptr
<
Allocation
>
AllocShared
(
const
platform
::
Place
&
place
,
size_t
size
,
const
p
latform
::
Stream
&
stream
)
{
const
p
ten
::
Stream
&
stream
)
{
return
allocation
::
AllocatorFacade
::
Instance
().
AllocShared
(
place
,
size
,
stream
);
}
bool
InSameStream
(
const
std
::
shared_ptr
<
Allocation
>&
allocation
,
const
p
latform
::
Stream
&
stream
)
{
const
p
ten
::
Stream
&
stream
)
{
return
allocation
::
AllocatorFacade
::
Instance
().
InSameStream
(
allocation
,
stream
);
}
...
...
paddle/fluid/memory/malloc.h
浏览文件 @
266955a9
...
...
@@ -18,8 +18,8 @@ limitations under the License. */
#include "paddle/fluid/memory/allocation/allocator.h"
#include "paddle/fluid/platform/place.h"
#include "paddle/fluid/platform/stream/stream.h"
#include "paddle/pten/core/device_context.h"
#include "paddle/pten/core/stream.h"
namespace
paddle
{
namespace
memory
{
...
...
@@ -39,10 +39,10 @@ extern uint64_t Release(const platform::Place& place);
extern
std
::
shared_ptr
<
Allocation
>
AllocShared
(
const
platform
::
Place
&
place
,
size_t
size
,
const
p
latform
::
Stream
&
stream
);
const
p
ten
::
Stream
&
stream
);
extern
bool
InSameStream
(
const
std
::
shared_ptr
<
Allocation
>&
allocation
,
const
p
latform
::
Stream
&
stream
);
const
p
ten
::
Stream
&
stream
);
extern
void
*
GetBasePtr
(
const
std
::
shared_ptr
<
Allocation
>&
allocation
);
...
...
paddle/fluid/memory/stream_safe_cuda_alloc_test.cu
浏览文件 @
266955a9
...
...
@@ -30,7 +30,7 @@
#include "paddle/fluid/platform/cuda_graph_with_memory_pool.h"
#include "paddle/fluid/platform/device/gpu/gpu_info.h"
#include "paddle/fluid/platform/device_context.h"
#include "paddle/
fluid/platform/stream
/stream.h"
#include "paddle/
pten/core
/stream.h"
namespace
paddle
{
namespace
memory
{
...
...
@@ -70,9 +70,9 @@ class StreamSafeCUDAAllocTest : public ::testing::Test {
PADDLE_ENFORCE_GPU_SUCCESS
(
hipStreamCreate
(
&
stream
));
#endif
std
::
shared_ptr
<
Allocation
>
allocation
=
AllocShared
(
place_
,
workspace_size_
,
platform
::
Stream
(
reinterpret_cast
<
platform
::
StreamId
>
(
stream
)));
std
::
shared_ptr
<
Allocation
>
allocation
=
AllocShared
(
place_
,
workspace_size_
,
pten
::
Stream
(
reinterpret_cast
<
pten
::
StreamId
>
(
stream
)));
#ifdef PADDLE_WITH_CUDA
PADDLE_ENFORCE_GPU_SUCCESS
(
cudaMemset
(
allocation
->
ptr
(),
0
,
allocation
->
size
()));
...
...
@@ -286,9 +286,9 @@ TEST(StreamSafeCUDAAllocInterfaceTest, GetStreamInterfaceTest) {
PADDLE_ENFORCE_GPU_SUCCESS
(
hipStreamCreate
(
&
new_stream
));
#endif
std
::
shared_ptr
<
Allocation
>
allocation_new_stream
=
AllocShared
(
place
,
alloc_size
,
platform
::
Stream
(
reinterpret_cast
<
platform
::
StreamId
>
(
new_stream
)));
std
::
shared_ptr
<
Allocation
>
allocation_new_stream
=
AllocShared
(
place
,
alloc_size
,
pten
::
Stream
(
reinterpret_cast
<
pten
::
StreamId
>
(
new_stream
)));
EXPECT_EQ
(
GetStream
(
allocation_new_stream
),
new_stream
);
#ifdef PADDLE_WITH_CUDA
...
...
@@ -315,10 +315,10 @@ TEST(StreamSafeCUDAAllocInterfaceTest, CUDAGraphExceptionTest) {
EXPECT_THROW
(
Release
(
place
),
paddle
::
platform
::
EnforceNotMet
);
EXPECT_THROW
(
allocation
::
AllocatorFacade
::
Instance
().
GetAllocator
(
place
),
paddle
::
platform
::
EnforceNotMet
);
EXPECT_THROW
(
AllocShared
(
place
,
alloc_size
,
platform
::
Stream
(
reinterpret_cast
<
platform
::
StreamId
>
(
nullptr
))),
paddle
::
platform
::
EnforceNotMet
);
EXPECT_THROW
(
AllocShared
(
place
,
alloc_size
,
pten
::
Stream
(
reinterpret_cast
<
pten
::
StreamId
>
(
nullptr
))),
paddle
::
platform
::
EnforceNotMet
);
EXPECT_THROW
(
Alloc
(
place
,
alloc_size
,
nullptr
),
paddle
::
platform
::
EnforceNotMet
);
EXPECT_THROW
(
Release
(
place
,
nullptr
),
paddle
::
platform
::
EnforceNotMet
);
...
...
paddle/fluid/operators/memcpy_h2d_op.h
浏览文件 @
266955a9
...
...
@@ -15,7 +15,7 @@ limitations under the License. */
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/var_type.h"
#include "paddle/fluid/platform/device_context.h"
#include "paddle/
fluid/platform/stream
/stream.h"
#include "paddle/
pten/core
/stream.h"
namespace
pten
{
class
DenseTensor
;
...
...
@@ -47,7 +47,7 @@ class MemcpyH2DFunctor {
#endif
out_tensor
.
mutable_data
(
dev_ctx_
.
GetPlace
(),
lod_tensor
.
type
(),
p
latform
::
Stream
(
reinterpret_cast
<
platform
::
StreamId
>
(
stream
)));
p
ten
::
Stream
(
reinterpret_cast
<
pten
::
StreamId
>
(
stream
)));
if
(
dst_place_type_
==
0
||
dst_place_type_
==
1
)
{
framework
::
TensorCopy
(
lod_tensor
,
dev_ctx_
.
GetPlace
(),
dev_ctx_
,
...
...
paddle/pten/core/dense_tensor.h
浏览文件 @
266955a9
...
...
@@ -16,12 +16,12 @@ limitations under the License. */
#include "paddle/pten/core/allocator.h"
#include "paddle/pten/core/storage.h"
#include "paddle/pten/core/stream.h"
#include "paddle/pten/core/tensor_base.h"
#include "paddle/pten/core/tensor_meta.h"
// See Note [ Why still include the fluid headers? ]
#include "paddle/fluid/framework/data_type.h"
#include "paddle/fluid/platform/stream/stream.h"
/* @jim19930609: Move to MKLDNN_Tensor in the future
*/
...
...
paddle/pten/core/dense_tensor.inl
浏览文件 @
266955a9
...
...
@@ -76,7 +76,7 @@ void* mutable_data(const paddle::platform::Place& place,
void* mutable_data(const paddle::platform::Place& place,
paddle::framework::proto::VarType::Type type,
const p
addle::platform
::Stream& stream);
const p
ten
::Stream& stream);
/* @jim19930609: Remove dependency on protobuf after Tensor Unification.
*/
...
...
paddle/pten/core/dense_tensor_impl.cc
浏览文件 @
266955a9
...
...
@@ -144,7 +144,7 @@ void* DenseTensor::mutable_data(const paddle::platform::Place& place,
void
*
DenseTensor
::
mutable_data
(
const
paddle
::
platform
::
Place
&
place
,
paddle
::
framework
::
proto
::
VarType
::
Type
type
,
const
p
addle
::
platform
::
Stream
&
stream
)
{
const
p
ten
::
Stream
&
stream
)
{
set_type
(
type
);
PADDLE_ENFORCE_GE
(
numel
(),
...
...
paddle/
fluid/platform/stream
/stream.h
→
paddle/
pten/core
/stream.h
浏览文件 @
266955a9
...
...
@@ -17,8 +17,7 @@ limitations under the License. */
#include <cstdint>
#include <memory>
namespace
paddle
{
namespace
platform
{
namespace
pten
{
using
StreamId
=
uint64_t
;
class
Stream
final
{
...
...
@@ -30,5 +29,4 @@ class Stream final {
StreamId
id_
;
};
}
// namespace platform
}
// namespace paddle
}
// namespace pten
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录