Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
a90b8dc1
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
a90b8dc1
编写于
7月 28, 2022
作者:
J
Jiabin Yang
提交者:
GitHub
7月 28, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Support broadcast tensor in phi system (#44590)
上级
acf07c74
变更
7
显示空白变更内容
内联
并排
Showing
7 changed file
with
66 addition
and
14 deletion
+66
-14
paddle/phi/api/lib/api_gen_utils.cc
paddle/phi/api/lib/api_gen_utils.cc
+3
-3
paddle/phi/api/lib/api_gen_utils.h
paddle/phi/api/lib/api_gen_utils.h
+1
-1
paddle/phi/api/yaml/generator/api_base.py
paddle/phi/api/yaml/generator/api_base.py
+10
-4
paddle/phi/api/yaml/legacy_api.yaml
paddle/phi/api/yaml/legacy_api.yaml
+9
-0
paddle/phi/api/yaml/legacy_backward.yaml
paddle/phi/api/yaml/legacy_backward.yaml
+12
-0
python/paddle/fluid/tests/unittests/test_broadcast_tensors_op.py
...paddle/fluid/tests/unittests/test_broadcast_tensors_op.py
+28
-5
python/paddle/tensor/manipulation.py
python/paddle/tensor/manipulation.py
+3
-1
未找到文件。
paddle/phi/api/lib/api_gen_utils.cc
浏览文件 @
a90b8dc1
...
...
@@ -31,14 +31,14 @@ paddle::optional<phi::DenseTensor> TensorToDenseTensor(
return
nullptr
;
}
std
::
unique_ptr
<
std
::
vector
<
phi
::
DenseTensor
>>
TensorToDenseTensor
(
std
::
unique_ptr
<
std
::
vector
<
phi
::
DenseTensor
*
>>
TensorToDenseTensor
(
const
std
::
vector
<
Tensor
>&
tensors
)
{
auto
pt_tensors
=
std
::
make_unique
<
std
::
vector
<
phi
::
DenseTensor
>>
();
auto
pt_tensors
=
std
::
make_unique
<
std
::
vector
<
phi
::
DenseTensor
*
>>
();
pt_tensors
->
reserve
(
tensors
.
size
());
for
(
const
auto
&
t
:
tensors
)
{
pt_tensors
->
push_back
(
*
std
::
dynamic_pointer_cast
<
phi
::
DenseTensor
>
(
t
.
impl
()
));
std
::
dynamic_pointer_cast
<
phi
::
DenseTensor
>
(
t
.
impl
()).
get
(
));
}
return
pt_tensors
;
...
...
paddle/phi/api/lib/api_gen_utils.h
浏览文件 @
a90b8dc1
...
...
@@ -35,7 +35,7 @@ std::shared_ptr<phi::DenseTensor> TensorToDenseTensor(const Tensor& tensor);
paddle
::
optional
<
phi
::
DenseTensor
>
TensorToDenseTensor
(
const
paddle
::
optional
<
Tensor
>&
tensor
);
std
::
unique_ptr
<
std
::
vector
<
phi
::
DenseTensor
>>
TensorToDenseTensor
(
std
::
unique_ptr
<
std
::
vector
<
phi
::
DenseTensor
*
>>
TensorToDenseTensor
(
const
std
::
vector
<
Tensor
>&
tensors
);
std
::
shared_ptr
<
phi
::
SelectedRows
>
TensorToSelectedRows
(
const
Tensor
&
tensor
);
...
...
paddle/phi/api/yaml/generator/api_base.py
浏览文件 @
a90b8dc1
...
...
@@ -582,18 +582,18 @@ PADDLE_API {self.get_return_type(inplace_flag=True)} {api_func_name}({self.get_d
trans_flag
=
"{false, true}"
if
input_name
in
self
.
optional_vars
:
input_tensor_code
=
input_tensor_code
+
f
"""
{
code_indent
}
auto
{
PREFIX_TENSOR_NAME
}{
input_name
}
= PrepareData(
{
input_name
}
, kernel.InputAt(
{
i
}
),
{
trans_flag
}
);"""
{
code_indent
}
auto
{
PREFIX_TENSOR_NAME
}{
input_name
}
= PrepareData(
{
input_name
}
, kernel.InputAt(
{
kernel_param
.
index
(
input_name
)
}
),
{
trans_flag
}
);"""
else
:
if
self
.
inputs
[
'input_info'
][
input_name
]
==
"const Tensor&"
:
input_tensor_code
=
input_tensor_code
+
f
"""
{
code_indent
}
auto
{
PREFIX_TENSOR_NAME
}{
input_name
}
= PrepareData(
{
input_name
}
, kernel.InputAt(
{
i
}
),
{
trans_flag
}
);"""
{
code_indent
}
auto
{
PREFIX_TENSOR_NAME
}{
input_name
}
= PrepareData(
{
input_name
}
, kernel.InputAt(
{
kernel_param
.
index
(
input_name
)
}
),
{
trans_flag
}
);"""
elif
self
.
inputs
[
'input_info'
][
input_name
]
==
"const std::vector<Tensor>&"
:
input_tensor_code
=
input_tensor_code
+
f
"""
{
code_indent
}
auto
{
PREFIX_TENSOR_NAME
}{
input_name
}
_vec = PrepareData(
{
input_name
}
, kernel.InputAt(
{
i
}
),
{
trans_flag
}
);
{
code_indent
}
auto
{
PREFIX_TENSOR_NAME
}{
input_name
}
_vec = PrepareData(
{
input_name
}
, kernel.InputAt(
{
kernel_param
.
index
(
input_name
)
}
),
{
trans_flag
}
);
{
code_indent
}
std::vector<const phi::DenseTensor*>
{
PREFIX_TENSOR_NAME
}{
input_name
}
(
{
PREFIX_TENSOR_NAME
}{
input_name
}
_vec->size());
{
code_indent
}
for (size_t i = 0; i <
{
PREFIX_TENSOR_NAME
}{
input_name
}
.size(); ++i) {{
{
code_indent
}
{
PREFIX_TENSOR_NAME
}{
input_name
}
[i] = &
{
PREFIX_TENSOR_NAME
}{
input_name
}
_vec->at(i);
...
...
@@ -611,6 +611,12 @@ PADDLE_API {self.get_return_type(inplace_flag=True)} {api_func_name}({self.get_d
input_tensor_code
=
input_tensor_code
+
f
"""
{
code_indent
}
paddle::optional<phi::TensorBase>
{
PREFIX_TENSOR_NAME
}{
input_name
}
=
{
input_name
}
? paddle::optional<phi::TensorBase>(*
{
input_name
}
->impl()) : paddle::none;"""
else
:
if
self
.
inputs
[
'input_info'
][
input_name
]
==
"const std::vector<Tensor>&"
:
input_tensor_code
=
input_tensor_code
+
f
"""
{
code_indent
}
auto
{
PREFIX_TENSOR_NAME
}{
input_name
}
_uq_ptr = TensorToDenseTensor(
{
input_name
}
);
{
code_indent
}
const auto&
{
PREFIX_TENSOR_NAME
}{
input_name
}
= *
{
PREFIX_TENSOR_NAME
}{
input_name
}
_uq_ptr;"""
else
:
input_tensor_code
=
input_tensor_code
+
f
"""
{
code_indent
}
auto
{
PREFIX_TENSOR_NAME
}{
input_name
}
=
{
input_name
}
.impl();"""
...
...
paddle/phi/api/yaml/legacy_api.yaml
浏览文件 @
a90b8dc1
...
...
@@ -2513,6 +2513,15 @@
output
:
Tensor
invoke
:
full_like(x, 0, dtype, place)
-
api
:
broadcast_tensors
args
:
(Tensor[] x)
output
:
Tensor[]{x.size()}
infer_meta
:
func
:
BroadcastTensorsInferMeta
kernel
:
func
:
broadcast_tensors
backward
:
broadcast_tensors_grad
# eig
-
api
:
eig
args
:
(Tensor x)
...
...
paddle/phi/api/yaml/legacy_backward.yaml
浏览文件 @
a90b8dc1
...
...
@@ -280,6 +280,18 @@
func
:
brelu_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
broadcast_tensors_grad
forward
:
broadcast_tensors (Tensor[] x) -> Tensor[](out)
args
:
(Tensor[] x, Tensor[] out_grad)
output
:
Tensor[](x_grad)
infer_meta
:
func
:
UnchangedMultiInferMeta
param
:
[
x
]
kernel
:
func
:
broadcast_tensors_grad
param
:
[
out_grad
]
no_need_buffer
:
x
-
backward_api
:
cast_grad
forward
:
cast (Tensor x, DataType out_dtype) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad)
...
...
python/paddle/fluid/tests/unittests/test_broadcast_tensors_op.py
浏览文件 @
a90b8dc1
...
...
@@ -99,26 +99,49 @@ class TestCPUBroadcastTensorsOp(OpTest):
]
self
.
set_place
()
self
.
set_dtypes
()
self
.
python_api
=
paddle
.
broadcast_tensors
def
run_test
(
self
,
test_func
,
args
):
def
run_
dual_
test
(
self
,
test_func
,
args
):
for
dtype
in
self
.
dtypes
:
for
gen_func
in
self
.
test_gen_func_list
:
self
.
inputs
,
self
.
outputs
=
gen_func
(
dtype
)
if
len
(
self
.
outputs
[
"Out"
])
<
3
:
self
.
python_out_sig
=
[
f
"out
{
i
}
"
for
i
in
range
(
len
(
self
.
outputs
[
"Out"
]))
]
test_func
(
**
args
)
def
run_triple_in_test
(
self
,
test_func
,
args
):
for
dtype
in
self
.
dtypes
:
self
.
inputs
,
self
.
outputs
=
self
.
test_gen_func_list
[
2
](
dtype
)
self
.
python_out_sig
=
[
f
"out
{
i
}
"
for
i
in
range
(
len
(
self
.
outputs
[
"Out"
]))
]
test_func
(
**
args
)
def
test_check_output
(
self
):
self
.
run_test
(
self
.
check_output_with_place
,
{
self
.
run_
dual_
test
(
self
.
check_output_with_place
,
{
"place"
:
self
.
place
,
"atol"
:
1e-1
"atol"
:
1e-1
,
"check_eager"
:
True
})
def
test_check_grad_normal
(
self
):
self
.
run_test
(
self
.
run_
dual_
test
(
self
.
check_grad_with_place
,
{
"place"
:
self
.
place
,
"inputs_to_check"
:
[
'x0'
,
'x1'
],
"output_names"
:
[
'out0'
,
'out1'
],
"max_relative_error"
:
0.05
,
"check_eager"
:
True
})
self
.
run_triple_in_test
(
self
.
check_grad_with_place
,
{
"place"
:
self
.
place
,
"inputs_to_check"
:
[
'x0'
,
'x1'
,
'x2'
],
"output_names"
:
[
'out0'
,
'out1'
,
"out2"
],
"max_relative_error"
:
0.05
,
"check_eager"
:
True
})
...
...
python/paddle/tensor/manipulation.py
浏览文件 @
a90b8dc1
...
...
@@ -1132,7 +1132,9 @@ def broadcast_tensors(input, name=None):
"""
num_inputs
=
len
(
input
)
if
paddle
.
in_dynamic_mode
():
if
paddle
.
framework
.
in_dygraph_mode
():
return
_C_ops
.
final_state_broadcast_tensors
(
input
)
if
paddle
.
framework
.
_non_static_mode
():
return
_C_ops
.
broadcast_tensors
(
input
,
num_inputs
)
check_type
(
input
,
'input'
,
(
list
,
tuple
),
'broadcast_tensors'
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录