Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
870402fd
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
870402fd
编写于
4月 05, 2022
作者:
Y
YuanRisheng
提交者:
GitHub
4月 05, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
move meshgrid yaml (#41411)
上级
b72a7ebb
变更
8
显示空白变更内容
内联
并排
Showing
8 changed file
with
227 addition
and
1 deletion
+227
-1
paddle/phi/api/lib/api_custom_impl.cc
paddle/phi/api/lib/api_custom_impl.cc
+148
-0
paddle/phi/api/lib/api_custom_impl.h
paddle/phi/api/lib/api_custom_impl.h
+3
-0
paddle/phi/infermeta/backward.cc
paddle/phi/infermeta/backward.cc
+14
-0
paddle/phi/infermeta/backward.h
paddle/phi/infermeta/backward.h
+4
-0
python/paddle/fluid/tests/unittests/test_meshgrid_op.py
python/paddle/fluid/tests/unittests/test_meshgrid_op.py
+43
-0
python/paddle/tensor/creation.py
python/paddle/tensor/creation.py
+3
-1
python/paddle/utils/code_gen/api.yaml
python/paddle/utils/code_gen/api.yaml
+6
-0
python/paddle/utils/code_gen/backward.yaml
python/paddle/utils/code_gen/backward.yaml
+6
-0
未找到文件。
paddle/phi/api/lib/api_custom_impl.cc
浏览文件 @
870402fd
...
@@ -410,5 +410,153 @@ std::vector<Tensor> stack_grad_impl(const std::vector<Tensor>& x,
...
@@ -410,5 +410,153 @@ std::vector<Tensor> stack_grad_impl(const std::vector<Tensor>& x,
return
x_grad
;
return
x_grad
;
}
}
std
::
vector
<
Tensor
>
meshgrid_impl
(
const
std
::
vector
<
Tensor
>&
inputs
)
{
Backend
kernel_backend
=
Backend
::
UNDEFINED
;
DataLayout
kernel_layout
=
DataLayout
::
UNDEFINED
;
DataType
kernel_data_type
=
DataType
::
UNDEFINED
;
if
(
kernel_backend
==
Backend
::
UNDEFINED
||
kernel_layout
==
DataLayout
::
UNDEFINED
||
kernel_data_type
==
DataType
::
UNDEFINED
)
{
auto
kernel_key_set
=
ParseKernelKeyByInputArgs
(
inputs
);
auto
kernel_key
=
kernel_key_set
.
GetHighestPriorityKernelKey
();
if
(
kernel_backend
==
Backend
::
UNDEFINED
)
{
kernel_backend
=
kernel_key
.
backend
();
}
if
(
kernel_layout
==
DataLayout
::
UNDEFINED
)
{
kernel_layout
=
kernel_key
.
layout
();
}
if
(
kernel_data_type
==
DataType
::
UNDEFINED
)
{
kernel_data_type
=
kernel_key
.
dtype
();
}
}
const
auto
&
kernel
=
phi
::
KernelFactory
::
Instance
().
SelectKernelOrThrowError
(
"meshgrid"
,
{
kernel_backend
,
kernel_layout
,
kernel_data_type
});
VLOG
(
6
)
<<
"meshgrid API kernel key: ["
<<
kernel_backend
<<
", "
<<
kernel_layout
<<
", "
<<
kernel_data_type
<<
"]"
;
VLOG
(
6
)
<<
"meshgrid API kernel: "
<<
kernel
;
auto
*
dev_ctx
=
GetDeviceContextByBackend
(
kernel_backend
);
auto
input_inputs_vec
=
PrepareData
(
inputs
,
kernel
.
InputAt
(
0
),
{});
std
::
vector
<
const
phi
::
DenseTensor
*>
input_inputs
(
input_inputs_vec
->
size
());
for
(
size_t
i
=
0
;
i
<
input_inputs
.
size
();
++
i
)
{
input_inputs
[
i
]
=
&
input_inputs_vec
->
at
(
i
);
}
auto
x_meta_vec
=
MakeMetaTensor
(
input_inputs
);
std
::
vector
<
phi
::
MetaTensor
*>
inputs_metas
(
x_meta_vec
.
size
());
for
(
size_t
i
=
0
;
i
<
x_meta_vec
.
size
();
++
i
)
{
inputs_metas
[
i
]
=
&
x_meta_vec
[
i
];
}
// Calculate the number of out tensors
size_t
out_number
=
inputs
.
size
();
std
::
vector
<
Tensor
>
out
;
auto
dense_outs
=
SetKernelOutput
(
out_number
,
kernel_backend
,
&
out
);
std
::
vector
<
phi
::
MetaTensor
>
meta_outs
;
meta_outs
.
reserve
(
out_number
);
std
::
vector
<
phi
::
MetaTensor
*>
meta_out_ptrs
;
meta_out_ptrs
.
reserve
(
out_number
);
for
(
size_t
i
=
0
;
i
<
out_number
;
++
i
)
{
meta_outs
.
push_back
(
dense_outs
[
i
]);
meta_out_ptrs
.
push_back
(
&
meta_outs
.
back
());
}
phi
::
MeshgridInferMeta
(
inputs_metas
,
meta_out_ptrs
);
using
kernel_signature
=
void
(
*
)(
const
platform
::
DeviceContext
&
,
const
std
::
vector
<
const
phi
::
DenseTensor
*>&
,
std
::
vector
<
phi
::
DenseTensor
*>&
);
auto
*
kernel_fn
=
kernel
.
GetVariadicKernelFn
<
kernel_signature
>
();
(
*
kernel_fn
)(
*
dev_ctx
,
input_inputs
,
dense_outs
);
return
out
;
}
std
::
vector
<
Tensor
>
meshgrid_grad_impl
(
const
std
::
vector
<
Tensor
>&
inputs
,
const
std
::
vector
<
Tensor
>&
outputs_grad
)
{
Backend
kernel_backend
=
Backend
::
UNDEFINED
;
DataLayout
kernel_layout
=
DataLayout
::
UNDEFINED
;
DataType
kernel_data_type
=
DataType
::
UNDEFINED
;
if
(
kernel_backend
==
Backend
::
UNDEFINED
||
kernel_layout
==
DataLayout
::
UNDEFINED
||
kernel_data_type
==
DataType
::
UNDEFINED
)
{
auto
kernel_key_set
=
ParseKernelKeyByInputArgs
(
inputs
,
outputs_grad
);
auto
kernel_key
=
kernel_key_set
.
GetHighestPriorityKernelKey
();
if
(
kernel_backend
==
Backend
::
UNDEFINED
)
{
kernel_backend
=
kernel_key
.
backend
();
}
if
(
kernel_layout
==
DataLayout
::
UNDEFINED
)
{
kernel_layout
=
kernel_key
.
layout
();
}
if
(
kernel_data_type
==
DataType
::
UNDEFINED
)
{
kernel_data_type
=
kernel_key
.
dtype
();
}
}
const
auto
&
kernel
=
phi
::
KernelFactory
::
Instance
().
SelectKernelOrThrowError
(
"meshgrid_grad"
,
{
kernel_backend
,
kernel_layout
,
kernel_data_type
});
VLOG
(
6
)
<<
"meshgrid_grad API kernel key: ["
<<
kernel_backend
<<
", "
<<
kernel_layout
<<
", "
<<
kernel_data_type
<<
"]"
;
VLOG
(
6
)
<<
"meshgrid_grad API kernel: "
<<
kernel
;
auto
*
dev_ctx
=
GetDeviceContextByBackend
(
kernel_backend
);
auto
input_inputs_vec
=
PrepareData
(
inputs
,
kernel
.
InputAt
(
0
),
{});
std
::
vector
<
const
phi
::
DenseTensor
*>
input_inputs
(
input_inputs_vec
->
size
());
for
(
size_t
i
=
0
;
i
<
input_inputs
.
size
();
++
i
)
{
input_inputs
[
i
]
=
&
input_inputs_vec
->
at
(
i
);
}
auto
input_outputs_grad_vec
=
PrepareData
(
outputs_grad
,
kernel
.
InputAt
(
1
),
{});
std
::
vector
<
const
phi
::
DenseTensor
*>
input_outputs_grad
(
input_outputs_grad_vec
->
size
());
for
(
size_t
i
=
0
;
i
<
input_outputs_grad
.
size
();
++
i
)
{
input_outputs_grad
[
i
]
=
&
input_outputs_grad_vec
->
at
(
i
);
}
size_t
out_number
=
inputs
.
size
();
std
::
vector
<
Tensor
>
api_output
;
auto
kernel_out
=
SetKernelOutput
(
out_number
,
kernel_backend
,
&
api_output
);
auto
inputs_meta_vec
=
MakeMetaTensor
(
input_inputs
);
std
::
vector
<
phi
::
MetaTensor
*>
inputs_metas
(
inputs_meta_vec
.
size
());
for
(
size_t
i
=
0
;
i
<
inputs_meta_vec
.
size
();
++
i
)
{
inputs_metas
[
i
]
=
&
inputs_meta_vec
[
i
];
}
auto
outputs_grad_meta_vec
=
MakeMetaTensor
(
input_outputs_grad
);
std
::
vector
<
phi
::
MetaTensor
*>
outputs_grad_metas
(
outputs_grad_meta_vec
.
size
());
for
(
size_t
i
=
0
;
i
<
outputs_grad_meta_vec
.
size
();
++
i
)
{
outputs_grad_metas
[
i
]
=
&
outputs_grad_meta_vec
[
i
];
}
std
::
vector
<
phi
::
MetaTensor
>
meta_outs
;
meta_outs
.
reserve
(
out_number
);
std
::
vector
<
phi
::
MetaTensor
*>
meta_out_ptrs
;
meta_out_ptrs
.
reserve
(
out_number
);
for
(
size_t
i
=
0
;
i
<
out_number
;
++
i
)
{
meta_outs
.
push_back
(
kernel_out
[
i
]);
meta_out_ptrs
.
push_back
(
&
meta_outs
.
back
());
}
phi
::
MeshgridGradInferMeta
(
inputs_metas
,
outputs_grad_metas
,
meta_out_ptrs
);
using
kernel_signature
=
void
(
*
)(
const
platform
::
DeviceContext
&
,
const
std
::
vector
<
const
phi
::
DenseTensor
*>&
,
const
std
::
vector
<
const
phi
::
DenseTensor
*>&
,
std
::
vector
<
phi
::
DenseTensor
*>&
);
auto
*
kernel_fn
=
kernel
.
GetVariadicKernelFn
<
kernel_signature
>
();
(
*
kernel_fn
)(
*
dev_ctx
,
input_inputs
,
input_outputs_grad
,
kernel_out
);
return
api_output
;
}
}
// namespace experimental
}
// namespace experimental
}
// namespace paddle
}
// namespace paddle
paddle/phi/api/lib/api_custom_impl.h
浏览文件 @
870402fd
...
@@ -59,6 +59,9 @@ std::vector<Tensor> concat_grad_impl(const std::vector<Tensor>& x,
...
@@ -59,6 +59,9 @@ std::vector<Tensor> concat_grad_impl(const std::vector<Tensor>& x,
std
::
vector
<
Tensor
>
stack_grad_impl
(
const
std
::
vector
<
Tensor
>&
x
,
std
::
vector
<
Tensor
>
stack_grad_impl
(
const
std
::
vector
<
Tensor
>&
x
,
const
Tensor
&
out_grad
,
const
Tensor
&
out_grad
,
int
axis
);
int
axis
);
std
::
vector
<
Tensor
>
meshgrid_impl
(
const
std
::
vector
<
Tensor
>&
inputs
);
std
::
vector
<
Tensor
>
meshgrid_grad_impl
(
const
std
::
vector
<
Tensor
>&
inputs
,
const
std
::
vector
<
Tensor
>&
outputs_grad
);
}
// namespace experimental
}
// namespace experimental
}
// namespace paddle
}
// namespace paddle
paddle/phi/infermeta/backward.cc
浏览文件 @
870402fd
...
@@ -245,6 +245,20 @@ void MaxPoolWithIndexGradInferMeta(const MetaTensor& x,
...
@@ -245,6 +245,20 @@ void MaxPoolWithIndexGradInferMeta(const MetaTensor& x,
dx
->
share_meta
(
x
);
dx
->
share_meta
(
x
);
}
}
void
MeshgridGradInferMeta
(
const
std
::
vector
<
MetaTensor
*>&
inputs
,
const
std
::
vector
<
MetaTensor
*>&
outputs_grad
,
std
::
vector
<
MetaTensor
*>
inputs_grad
)
{
PADDLE_ENFORCE_GT
(
outputs_grad
.
size
(),
1
,
errors
::
InvalidArgument
(
"Number of Inputs(Out@Grad) should be larger than 1."
"But received Inputs(Out@Grad)' size = %d ."
,
outputs_grad
.
size
()));
for
(
size_t
i
=
0
;
i
<
inputs
.
size
();
i
++
)
{
inputs_grad
[
i
]
->
share_meta
(
*
inputs
[
i
]);
}
}
void
NllLossGradInferMeta
(
const
MetaTensor
&
x
,
void
NllLossGradInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
label
,
const
MetaTensor
&
label
,
paddle
::
optional
<
const
MetaTensor
&>
weight
,
paddle
::
optional
<
const
MetaTensor
&>
weight
,
...
...
paddle/phi/infermeta/backward.h
浏览文件 @
870402fd
...
@@ -115,6 +115,10 @@ void MaxPoolWithIndexGradInferMeta(const MetaTensor& x,
...
@@ -115,6 +115,10 @@ void MaxPoolWithIndexGradInferMeta(const MetaTensor& x,
bool
adaptive
,
bool
adaptive
,
MetaTensor
*
dx
);
MetaTensor
*
dx
);
void
MeshgridGradInferMeta
(
const
std
::
vector
<
MetaTensor
*>&
inputs
,
const
std
::
vector
<
MetaTensor
*>&
outputs_grad
,
std
::
vector
<
MetaTensor
*>
inputs_grad
);
void
NllLossGradInferMeta
(
const
MetaTensor
&
input
,
void
NllLossGradInferMeta
(
const
MetaTensor
&
input
,
const
MetaTensor
&
label
,
const
MetaTensor
&
label
,
paddle
::
optional
<
const
MetaTensor
&>
weight
,
paddle
::
optional
<
const
MetaTensor
&>
weight
,
...
...
python/paddle/fluid/tests/unittests/test_meshgrid_op.py
浏览文件 @
870402fd
...
@@ -20,6 +20,7 @@ from op_test import OpTest, skip_check_grad_ci
...
@@ -20,6 +20,7 @@ from op_test import OpTest, skip_check_grad_ci
import
paddle.fluid
as
fluid
import
paddle.fluid
as
fluid
import
paddle
import
paddle
from
paddle.fluid
import
compiler
,
Program
,
program_guard
,
core
from
paddle.fluid
import
compiler
,
Program
,
program_guard
,
core
from
paddle.fluid.framework
import
_test_eager_guard
class
TestMeshgridOp
(
OpTest
):
class
TestMeshgridOp
(
OpTest
):
...
@@ -149,6 +150,10 @@ class TestMeshgridOp6(unittest.TestCase):
...
@@ -149,6 +150,10 @@ class TestMeshgridOp6(unittest.TestCase):
assert
np
.
array_equal
(
res_3
.
shape
,
[
100
,
200
])
assert
np
.
array_equal
(
res_3
.
shape
,
[
100
,
200
])
assert
np
.
array_equal
(
res_4
.
shape
,
[
100
,
200
])
assert
np
.
array_equal
(
res_4
.
shape
,
[
100
,
200
])
def
test_api_eager_dygraph
(
self
):
with
_test_eager_guard
():
self
.
test_api_with_dygraph
()
class
TestMeshgridOp7
(
unittest
.
TestCase
):
class
TestMeshgridOp7
(
unittest
.
TestCase
):
def
test_api_with_dygraph_list_input
(
self
):
def
test_api_with_dygraph_list_input
(
self
):
...
@@ -163,6 +168,10 @@ class TestMeshgridOp7(unittest.TestCase):
...
@@ -163,6 +168,10 @@ class TestMeshgridOp7(unittest.TestCase):
assert
np
.
array_equal
(
res_3
.
shape
,
[
100
,
200
])
assert
np
.
array_equal
(
res_3
.
shape
,
[
100
,
200
])
assert
np
.
array_equal
(
res_4
.
shape
,
[
100
,
200
])
assert
np
.
array_equal
(
res_4
.
shape
,
[
100
,
200
])
def
test_api_eager_dygraph
(
self
):
with
_test_eager_guard
():
self
.
test_api_with_dygraph_list_input
()
class
TestMeshgridOp8
(
unittest
.
TestCase
):
class
TestMeshgridOp8
(
unittest
.
TestCase
):
def
test_api_with_dygraph_tuple_input
(
self
):
def
test_api_with_dygraph_tuple_input
(
self
):
...
@@ -177,6 +186,40 @@ class TestMeshgridOp8(unittest.TestCase):
...
@@ -177,6 +186,40 @@ class TestMeshgridOp8(unittest.TestCase):
assert
np
.
array_equal
(
res_3
.
shape
,
[
100
,
200
])
assert
np
.
array_equal
(
res_3
.
shape
,
[
100
,
200
])
assert
np
.
array_equal
(
res_4
.
shape
,
[
100
,
200
])
assert
np
.
array_equal
(
res_4
.
shape
,
[
100
,
200
])
def
test_api_eager_dygraph
(
self
):
with
_test_eager_guard
():
self
.
test_api_with_dygraph_tuple_input
()
class
TestMeshgridEager
(
unittest
.
TestCase
):
def
test_dygraph_final_state_api
(
self
):
input_1
=
np
.
random
.
randint
(
0
,
100
,
[
100
,
]).
astype
(
'int32'
)
input_2
=
np
.
random
.
randint
(
0
,
100
,
[
200
,
]).
astype
(
'int32'
)
with
fluid
.
dygraph
.
guard
():
tensor_1
=
fluid
.
dygraph
.
to_variable
(
input_1
)
tensor_2
=
fluid
.
dygraph
.
to_variable
(
input_2
)
tensor_1
.
stop_gradient
=
False
tensor_2
.
stop_gradient
=
False
res_1
,
res_2
=
paddle
.
tensor
.
meshgrid
((
tensor_1
,
tensor_2
))
sum
=
paddle
.
add_n
([
res_1
,
res_2
])
sum
.
backward
()
with
_test_eager_guard
():
tensor_eager_1
=
fluid
.
dygraph
.
to_variable
(
input_1
)
tensor_eager_2
=
fluid
.
dygraph
.
to_variable
(
input_2
)
tensor_eager_1
.
stop_gradient
=
False
tensor_eager_2
.
stop_gradient
=
False
res_eager_1
,
res_eager_2
=
paddle
.
tensor
.
meshgrid
(
(
tensor_eager_1
,
tensor_eager_2
))
sum_eager
=
paddle
.
add_n
([
res_eager_1
,
res_eager_2
])
sum_eager
.
backward
()
self
.
assertEqual
((
tensor_1
.
grad
.
numpy
()
==
tensor_eager_1
.
grad
.
numpy
()).
all
(),
True
)
self
.
assertEqual
((
tensor_2
.
grad
.
numpy
()
==
tensor_eager_2
.
grad
.
numpy
()).
all
(),
True
)
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
paddle
.
enable_static
()
paddle
.
enable_static
()
...
...
python/paddle/tensor/creation.py
浏览文件 @
870402fd
...
@@ -776,10 +776,12 @@ def meshgrid(*args, **kwargs):
...
@@ -776,10 +776,12 @@ def meshgrid(*args, **kwargs):
if
len
(
args
)
==
1
and
isinstance
(
args
[
0
],
(
list
,
tuple
)):
if
len
(
args
)
==
1
and
isinstance
(
args
[
0
],
(
list
,
tuple
)):
args
=
args
[
0
]
args
=
args
[
0
]
if
paddle
.
in_dynamic_mode
():
if
_in_legacy_dygraph
():
num
=
len
(
args
)
num
=
len
(
args
)
out
=
_C_ops
.
meshgrid
(
list
(
args
),
num
)
out
=
_C_ops
.
meshgrid
(
list
(
args
),
num
)
return
out
return
out
if
in_dygraph_mode
():
return
_C_ops
.
final_state_meshgrid
(
list
(
args
))
name
=
kwargs
.
get
(
"name"
,
None
)
name
=
kwargs
.
get
(
"name"
,
None
)
helper
=
LayerHelper
(
'meshgrid'
,
**
locals
())
helper
=
LayerHelper
(
'meshgrid'
,
**
locals
())
...
...
python/paddle/utils/code_gen/api.yaml
浏览文件 @
870402fd
...
@@ -1120,6 +1120,12 @@
...
@@ -1120,6 +1120,12 @@
func
:
mean
func
:
mean
backward
:
mean_grad
backward
:
mean_grad
-
api
:
meshgrid
args
:
(Tensor[] inputs)
output
:
Tensor[]
invoke
:
meshgrid_impl(inputs)
backward
:
meshgrid_grad
-
api
:
min
-
api
:
min
args
:
(Tensor x, int64_t[] dims={}, bool keep_dim=false)
args
:
(Tensor x, int64_t[] dims={}, bool keep_dim=false)
output
:
Tensor(out)
output
:
Tensor(out)
...
...
python/paddle/utils/code_gen/backward.yaml
浏览文件 @
870402fd
...
@@ -777,6 +777,12 @@
...
@@ -777,6 +777,12 @@
kernel
:
kernel
:
func
:
mean_grad
func
:
mean_grad
-
backward_api
:
meshgrid_grad
forward
:
meshgrid (Tensor[] inputs) -> Tensor[](outputs)
args
:
(Tensor[] inputs, Tensor[] outputs_grad)
output
:
Tensor[](inputs_grad)
invoke
:
meshgrid_grad_impl(inputs, outputs_grad)
-
backward_api
:
min_grad
-
backward_api
:
min_grad
forward
:
min (Tensor x, int64_t[] dims={}, bool keep_dim=false) -> Tensor(out)
forward
:
min (Tensor x, int64_t[] dims={}, bool keep_dim=false) -> Tensor(out)
args
:
(Tensor x, Tensor out, Tensor out_grad, int64_t[] dims={}, bool keep_dim=false, bool reduce_all=false)
args
:
(Tensor x, Tensor out, Tensor out_grad, int64_t[] dims={}, bool keep_dim=false, bool reduce_all=false)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录