Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
dabb8f23
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
dabb8f23
编写于
9月 19, 2022
作者:
C
Chen Weihang
提交者:
GitHub
9月 19, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Revert "Simplify size op impl (#45808)" (#46168)
This reverts commit
c252b1de
.
上级
7a6db0a3
变更
5
隐藏空白更改
内联
并排
Showing
5 changed file
with
78 addition
and
23 deletion
+78
-23
paddle/phi/kernels/cpu/size_kernel.cc
paddle/phi/kernels/cpu/size_kernel.cc
+32
-0
paddle/phi/kernels/gpu/size_kernel.cu
paddle/phi/kernels/gpu/size_kernel.cu
+31
-0
paddle/phi/kernels/impl/size_kernel_impl.h
paddle/phi/kernels/impl/size_kernel_impl.h
+14
-19
paddle/phi/kernels/size_kernel.h
paddle/phi/kernels/size_kernel.h
+1
-1
python/paddle/distributed/collective.py
python/paddle/distributed/collective.py
+0
-3
未找到文件。
paddle/phi/kernels/cpu/size_kernel.cc
0 → 100644
浏览文件 @
dabb8f23
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/size_kernel.h"
#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/size_kernel_impl.h"
PD_REGISTER_KERNEL
(
size
,
CPU
,
ALL_LAYOUT
,
phi
::
SizeKernel
,
uint8_t
,
int16_t
,
int
,
int64_t
,
phi
::
dtype
::
float16
,
float
,
double
,
bool
)
{}
paddle/phi/kernels/gpu/size_kernel.cu
0 → 100644
浏览文件 @
dabb8f23
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/size_kernel.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/size_kernel_impl.h"
PD_REGISTER_KERNEL
(
size
,
GPU
,
ALL_LAYOUT
,
phi
::
SizeKernel
,
int16_t
,
int
,
int64_t
,
phi
::
dtype
::
float16
,
float
,
double
,
bool
)
{}
paddle/phi/kernels/
size_kernel.cc
→
paddle/phi/kernels/
impl/size_kernel_impl.h
浏览文件 @
dabb8f23
...
...
@@ -12,33 +12,28 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#
include "paddle/phi/kernels/size_kernel.h"
#
pragma once
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/tensor_utils.h"
namespace
phi
{
template
<
typename
Context
>
template
<
typename
T
,
typename
Context
>
void
SizeKernel
(
const
Context
&
ctx
,
const
DenseTensor
&
input
,
DenseTensor
*
out
)
{
auto
*
out_data
=
ctx
.
template
HostAlloc
<
int64_t
>(
out
);
out_data
[
0
]
=
input
.
numel
();
auto
place
=
ctx
.
GetPlace
();
auto
out_data
=
ctx
.
template
Alloc
<
int64_t
>(
out
);
auto
cpu_place
=
phi
::
CPUPlace
();
if
(
place
==
cpu_place
)
{
out_data
[
0
]
=
input
.
numel
();
}
else
{
DenseTensor
cpu_tensor
;
cpu_tensor
.
Resize
(
out
->
dims
());
auto
cpu_data
=
ctx
.
template
HostAlloc
<
int64_t
>(
&
cpu_tensor
);
cpu_data
[
0
]
=
input
.
numel
();
phi
::
Copy
(
ctx
,
cpu_tensor
,
place
,
false
,
out
);
}
}
}
// namespace phi
PD_REGISTER_GENERAL_KERNEL
(
size
,
CPU
,
ALL_LAYOUT
,
phi
::
SizeKernel
<
phi
::
CPUContext
>
,
ALL_DTYPE
)
{
kernel
->
OutputAt
(
0
).
SetDataType
(
phi
::
DataType
::
INT64
);
}
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
PD_REGISTER_GENERAL_KERNEL
(
size
,
GPU
,
ALL_LAYOUT
,
phi
::
SizeKernel
<
phi
::
GPUContext
>
,
ALL_DTYPE
)
{
kernel
->
OutputAt
(
0
)
.
SetBackend
(
phi
::
Backend
::
CPU
)
.
SetDataType
(
phi
::
DataType
::
INT64
);
}
#endif
paddle/phi/kernels/size_kernel.h
浏览文件 @
dabb8f23
...
...
@@ -18,7 +18,7 @@
namespace
phi
{
template
<
typename
Context
>
template
<
typename
T
,
typename
Context
>
void
SizeKernel
(
const
Context
&
ctx
,
const
DenseTensor
&
input
,
DenseTensor
*
out
);
}
// namespace phi
python/paddle/distributed/collective.py
浏览文件 @
dabb8f23
...
...
@@ -1140,9 +1140,6 @@ def all_gather_object(object_list, obj, group=None):
),
"all_gather_object doesn't support static graph mode."
tensor
,
len_of_tensor
=
_convert_object_to_tensor
(
obj
)
if
paddle
.
get_device
()
!=
"cpu"
:
len_of_tensor
=
len_of_tensor
.
_copy_to
(
paddle
.
framework
.
_current_expected_place
(),
False
)
# gather len_of_tensor from all ranks
list_len_of_tensor
=
[]
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录