Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
10225d22
P
Paddle
项目概览
PaddlePaddle
/
Paddle
大约 2 年 前同步成功
通知
2325
Star
20933
Fork
5424
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
10225d22
编写于
10月 17, 2022
作者:
Z
zhangkaihuo
提交者:
GitHub
10月 17, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
[cherry-pick]Sparse static graph (#46838)
cherry-pick : #46322, #46245 Sparse API 支持静态图
上级
976af0da
变更
45
隐藏空白更改
内联
并排
Showing
45 changed file
with
937 addition
and
74 deletion
+937
-74
paddle/fluid/framework/CMakeLists.txt
paddle/fluid/framework/CMakeLists.txt
+3
-2
paddle/fluid/framework/feed_fetch_type.h
paddle/fluid/framework/feed_fetch_type.h
+10
-2
paddle/fluid/framework/framework.proto
paddle/fluid/framework/framework.proto
+3
-0
paddle/fluid/framework/infershape_utils.cc
paddle/fluid/framework/infershape_utils.cc
+45
-1
paddle/fluid/framework/infershape_utils.h
paddle/fluid/framework/infershape_utils.h
+3
-0
paddle/fluid/framework/operator.cc
paddle/fluid/framework/operator.cc
+40
-0
paddle/fluid/framework/operator.h
paddle/fluid/framework/operator.h
+5
-0
paddle/fluid/framework/tensor.h
paddle/fluid/framework/tensor.h
+1
-0
paddle/fluid/framework/var_desc.cc
paddle/fluid/framework/var_desc.cc
+4
-0
paddle/fluid/framework/var_type.h
paddle/fluid/framework/var_type.h
+4
-0
paddle/fluid/framework/var_type_traits.h
paddle/fluid/framework/var_type_traits.h
+3
-0
paddle/fluid/framework/variable_helper.cc
paddle/fluid/framework/variable_helper.cc
+2
-0
paddle/fluid/inference/tensorrt/plugin_arg_mapping_context.cc
...le/fluid/inference/tensorrt/plugin_arg_mapping_context.cc
+4
-0
paddle/fluid/inference/tensorrt/plugin_arg_mapping_context.h
paddle/fluid/inference/tensorrt/plugin_arg_mapping_context.h
+2
-0
paddle/fluid/operators/controlflow/feed_op.cc
paddle/fluid/operators/controlflow/feed_op.cc
+17
-0
paddle/fluid/operators/controlflow/fetch_op.cc
paddle/fluid/operators/controlflow/fetch_op.cc
+3
-0
paddle/fluid/operators/controlflow/fetch_v2_op.cc
paddle/fluid/operators/controlflow/fetch_v2_op.cc
+12
-0
paddle/fluid/operators/sparse_manual_op.cc
paddle/fluid/operators/sparse_manual_op.cc
+226
-0
paddle/fluid/pybind/protobuf.cc
paddle/fluid/pybind/protobuf.cc
+2
-1
paddle/fluid/pybind/pybind.cc
paddle/fluid/pybind/pybind.cc
+3
-0
paddle/fluid/pybind/tensor.cc
paddle/fluid/pybind/tensor.cc
+14
-0
paddle/phi/api/lib/api_gen_utils.cc
paddle/phi/api/lib/api_gen_utils.cc
+3
-0
paddle/phi/api/yaml/sparse_backward.yaml
paddle/phi/api/yaml/sparse_backward.yaml
+8
-7
paddle/phi/api/yaml/sparse_ops.yaml
paddle/phi/api/yaml/sparse_ops.yaml
+3
-2
paddle/phi/core/compat/arg_map_context.h
paddle/phi/core/compat/arg_map_context.h
+1
-0
paddle/phi/core/meta_tensor.cc
paddle/phi/core/meta_tensor.cc
+3
-0
paddle/phi/core/meta_tensor.h
paddle/phi/core/meta_tensor.h
+6
-0
paddle/phi/kernels/sparse/cpu/elementwise_grad_kernel.cc
paddle/phi/kernels/sparse/cpu/elementwise_grad_kernel.cc
+11
-0
paddle/phi/kernels/sparse/cpu/elementwise_kernel.cc
paddle/phi/kernels/sparse/cpu/elementwise_kernel.cc
+27
-0
paddle/phi/kernels/sparse/elementwise_grad_kernel.h
paddle/phi/kernels/sparse/elementwise_grad_kernel.h
+25
-0
paddle/phi/kernels/sparse/elementwise_kernel.h
paddle/phi/kernels/sparse/elementwise_kernel.h
+20
-0
paddle/phi/kernels/sparse/gpu/conv.cu.h
paddle/phi/kernels/sparse/gpu/conv.cu.h
+11
-2
paddle/phi/kernels/sparse/gpu/elementwise_grad_kernel.cu
paddle/phi/kernels/sparse/gpu/elementwise_grad_kernel.cu
+15
-0
paddle/phi/kernels/sparse/gpu/elementwise_kernel.cu
paddle/phi/kernels/sparse/gpu/elementwise_kernel.cu
+14
-0
paddle/phi/ops/compat/sparse_manual_op_sig.cc
paddle/phi/ops/compat/sparse_manual_op_sig.cc
+108
-0
paddle/phi/tests/ops/test_op_signature.h
paddle/phi/tests/ops/test_op_signature.h
+4
-0
python/paddle/fluid/framework.py
python/paddle/fluid/framework.py
+3
-0
python/paddle/fluid/layer_helper_base.py
python/paddle/fluid/layer_helper_base.py
+24
-0
python/paddle/fluid/layers/math_op_patch.py
python/paddle/fluid/layers/math_op_patch.py
+35
-1
python/paddle/fluid/tests/unittests/test_sparse_conv_op.py
python/paddle/fluid/tests/unittests/test_sparse_conv_op.py
+64
-0
python/paddle/fluid/tests/unittests/test_sparse_elementwise_op.py
...addle/fluid/tests/unittests/test_sparse_elementwise_op.py
+26
-0
python/paddle/incubate/sparse/binary.py
python/paddle/incubate/sparse/binary.py
+17
-2
python/paddle/incubate/sparse/creation.py
python/paddle/incubate/sparse/creation.py
+54
-38
python/paddle/incubate/sparse/nn/functional/activation.py
python/paddle/incubate/sparse/nn/functional/activation.py
+13
-2
python/paddle/incubate/sparse/nn/functional/conv.py
python/paddle/incubate/sparse/nn/functional/conv.py
+36
-14
未找到文件。
paddle/fluid/framework/CMakeLists.txt
浏览文件 @
10225d22
...
@@ -190,7 +190,7 @@ cc_test(
...
@@ -190,7 +190,7 @@ cc_test(
cc_library
(
cc_library
(
var_type_traits
var_type_traits
SRCS var_type_traits.cc
SRCS var_type_traits.cc
DEPS framework_proto scope tensor_array
)
DEPS framework_proto scope tensor_array
sparse_coo_tensor
)
if
(
WITH_GPU
)
if
(
WITH_GPU
)
target_link_libraries
(
var_type_traits dynload_cuda
)
target_link_libraries
(
var_type_traits dynload_cuda
)
endif
()
endif
()
...
@@ -1138,7 +1138,8 @@ cc_library(
...
@@ -1138,7 +1138,8 @@ cc_library(
phi
phi
phi_api_utils
phi_api_utils
op_info
op_info
shape_inference
)
shape_inference
sparse_coo_tensor
)
cc_test
(
cc_test
(
infershape_utils_test
infershape_utils_test
SRCS infershape_utils_test.cc
SRCS infershape_utils_test.cc
...
...
paddle/fluid/framework/feed_fetch_type.h
浏览文件 @
10225d22
...
@@ -22,10 +22,11 @@ limitations under the License. */
...
@@ -22,10 +22,11 @@ limitations under the License. */
namespace
paddle
{
namespace
paddle
{
namespace
framework
{
namespace
framework
{
using
FeedType
=
paddle
::
variant
<
LoDTensor
,
Strings
>
;
using
FeedType
=
paddle
::
variant
<
LoDTensor
,
Strings
,
phi
::
SparseCooTensor
>
;
using
FeedList
=
std
::
vector
<
FeedType
>
;
using
FeedList
=
std
::
vector
<
FeedType
>
;
using
FetchType
=
paddle
::
variant
<
LoDTensor
,
LoDTensorArray
,
framework
::
Vocab
>
;
using
FetchType
=
paddle
::
variant
<
LoDTensor
,
LoDTensorArray
,
framework
::
Vocab
,
phi
::
SparseCooTensor
>
;
using
FetchList
=
std
::
vector
<
FetchType
>
;
using
FetchList
=
std
::
vector
<
FetchType
>
;
using
FetchUnmergedList
=
std
::
vector
<
std
::
vector
<
FetchType
>>
;
using
FetchUnmergedList
=
std
::
vector
<
std
::
vector
<
FetchType
>>
;
...
@@ -52,6 +53,13 @@ inline bool data_is_string_tensor(const FeedType &data) {
...
@@ -52,6 +53,13 @@ inline bool data_is_string_tensor(const FeedType &data) {
return
false
;
return
false
;
}
}
inline
bool
data_is_sparse_coo_tensor
(
const
FetchType
&
data
)
{
if
(
data
.
type
()
==
typeid
(
phi
::
SparseCooTensor
))
{
return
true
;
}
return
false
;
}
static
const
char
kFeedOpType
[]
=
"feed"
;
static
const
char
kFeedOpType
[]
=
"feed"
;
static
const
char
kFetchOpType
[]
=
"fetch"
;
static
const
char
kFetchOpType
[]
=
"fetch"
;
...
...
paddle/fluid/framework/framework.proto
浏览文件 @
10225d22
...
@@ -154,6 +154,8 @@ message VarType {
...
@@ -154,6 +154,8 @@ message VarType {
FEED_LIST
=
28
;
FEED_LIST
=
28
;
// The data type of phi::StringTensor
// The data type of phi::StringTensor
PSTRING
=
29
;
PSTRING
=
29
;
// the data type of phi::SparseCooTensor
SPARSE_COO
=
30
;
}
}
required
Type
type
=
1
;
required
Type
type
=
1
;
...
@@ -186,6 +188,7 @@ message VarType {
...
@@ -186,6 +188,7 @@ message VarType {
optional
TensorDesc
string
=
8
;
optional
TensorDesc
string
=
8
;
optional
TensorDesc
strings
=
9
;
optional
TensorDesc
strings
=
9
;
optional
TensorDesc
vocab
=
10
;
optional
TensorDesc
vocab
=
10
;
optional
TensorDesc
sparse_coo
=
11
;
}
}
message
VarDesc
{
message
VarDesc
{
...
...
paddle/fluid/framework/infershape_utils.cc
浏览文件 @
10225d22
...
@@ -101,6 +101,11 @@ class InferShapeArgumentMappingContext : public phi::ArgumentMappingContext {
...
@@ -101,6 +101,11 @@ class InferShapeArgumentMappingContext : public phi::ArgumentMappingContext {
});
});
}
}
bool
IsSparseCooTensorInput
(
const
std
::
string
&
name
)
const
override
{
auto
var_type
=
ctx_
.
GetInputVarType
(
name
);
return
var_type
==
proto
::
VarType
::
SPARSE_COO
;
}
bool
IsDenseTensorOutput
(
const
std
::
string
&
name
)
const
override
{
bool
IsDenseTensorOutput
(
const
std
::
string
&
name
)
const
override
{
auto
var_types
=
ctx_
.
GetOutputsVarType
(
name
);
auto
var_types
=
ctx_
.
GetOutputsVarType
(
name
);
return
std
::
all_of
(
var_types
.
begin
(),
return
std
::
all_of
(
var_types
.
begin
(),
...
@@ -145,6 +150,26 @@ int64_t CompatMetaTensor::numel() const {
...
@@ -145,6 +150,26 @@ int64_t CompatMetaTensor::numel() const {
}
}
}
}
bool
CompatMetaTensor
::
is_dense
()
const
{
if
(
is_runtime_
)
{
auto
*
var
=
PADDLE_GET_CONST
(
Variable
*
,
var_
);
return
var
->
IsType
<
phi
::
DenseTensor
>
();
}
else
{
auto
*
var
=
PADDLE_GET_CONST
(
VarDesc
*
,
var_
);
return
var
->
GetType
()
==
proto
::
VarType
::
LOD_TENSOR
;
}
}
bool
CompatMetaTensor
::
is_tensor_array
()
const
{
if
(
is_runtime_
)
{
auto
*
var
=
PADDLE_GET_CONST
(
Variable
*
,
var_
);
return
var
->
IsType
<
framework
::
LoDTensorArray
>
();
}
else
{
auto
*
var
=
PADDLE_GET_CONST
(
VarDesc
*
,
var_
);
return
var
->
GetType
()
==
proto
::
VarType
::
LOD_TENSOR_ARRAY
;
}
}
DDim
CompatMetaTensor
::
dims
()
const
{
DDim
CompatMetaTensor
::
dims
()
const
{
ValidCheck
(
*
this
);
ValidCheck
(
*
this
);
if
(
is_runtime_
)
{
if
(
is_runtime_
)
{
...
@@ -153,6 +178,8 @@ DDim CompatMetaTensor::dims() const {
...
@@ -153,6 +178,8 @@ DDim CompatMetaTensor::dims() const {
return
var
->
Get
<
phi
::
DenseTensor
>
().
dims
();
return
var
->
Get
<
phi
::
DenseTensor
>
().
dims
();
}
else
if
(
var
->
IsType
<
phi
::
SelectedRows
>
())
{
}
else
if
(
var
->
IsType
<
phi
::
SelectedRows
>
())
{
return
var
->
Get
<
phi
::
SelectedRows
>
().
dims
();
return
var
->
Get
<
phi
::
SelectedRows
>
().
dims
();
}
else
if
(
var
->
IsType
<
phi
::
SparseCooTensor
>
())
{
return
var
->
Get
<
phi
::
SparseCooTensor
>
().
dims
();
}
else
if
(
var
->
IsType
<
framework
::
LoDTensorArray
>
())
{
}
else
if
(
var
->
IsType
<
framework
::
LoDTensorArray
>
())
{
// use tensor array size as dims
// use tensor array size as dims
auto
&
tensor_array
=
var
->
Get
<
framework
::
LoDTensorArray
>
();
auto
&
tensor_array
=
var
->
Get
<
framework
::
LoDTensorArray
>
();
...
@@ -178,6 +205,8 @@ phi::DataType CompatMetaTensor::dtype() const {
...
@@ -178,6 +205,8 @@ phi::DataType CompatMetaTensor::dtype() const {
return
var
->
Get
<
phi
::
DenseTensor
>
().
dtype
();
return
var
->
Get
<
phi
::
DenseTensor
>
().
dtype
();
}
else
if
(
var
->
IsType
<
phi
::
SelectedRows
>
())
{
}
else
if
(
var
->
IsType
<
phi
::
SelectedRows
>
())
{
return
var
->
Get
<
phi
::
SelectedRows
>
().
dtype
();
return
var
->
Get
<
phi
::
SelectedRows
>
().
dtype
();
}
else
if
(
var
->
IsType
<
phi
::
SparseCooTensor
>
())
{
return
var
->
Get
<
phi
::
SparseCooTensor
>
().
dtype
();
}
else
if
(
var
->
IsType
<
framework
::
LoDTensorArray
>
())
{
}
else
if
(
var
->
IsType
<
framework
::
LoDTensorArray
>
())
{
// NOTE(chenweihang): do nothing
// NOTE(chenweihang): do nothing
// Unsupported get dtype from LoDTensorArray now
// Unsupported get dtype from LoDTensorArray now
...
@@ -200,6 +229,8 @@ DataLayout CompatMetaTensor::layout() const {
...
@@ -200,6 +229,8 @@ DataLayout CompatMetaTensor::layout() const {
return
var
->
Get
<
phi
::
DenseTensor
>
().
layout
();
return
var
->
Get
<
phi
::
DenseTensor
>
().
layout
();
}
else
if
(
var
->
IsType
<
phi
::
SelectedRows
>
())
{
}
else
if
(
var
->
IsType
<
phi
::
SelectedRows
>
())
{
return
var
->
Get
<
phi
::
SelectedRows
>
().
layout
();
return
var
->
Get
<
phi
::
SelectedRows
>
().
layout
();
}
else
if
(
var
->
IsType
<
phi
::
SparseCooTensor
>
())
{
return
var
->
Get
<
phi
::
SparseCooTensor
>
().
layout
();
}
else
if
(
var
->
IsType
<
framework
::
LoDTensorArray
>
())
{
}
else
if
(
var
->
IsType
<
framework
::
LoDTensorArray
>
())
{
// NOTE(chenweihang): do nothing
// NOTE(chenweihang): do nothing
// Unsupported get layout from LoDTensorArray now
// Unsupported get layout from LoDTensorArray now
...
@@ -226,6 +257,9 @@ void CompatMetaTensor::set_dims(const DDim& dims) {
...
@@ -226,6 +257,9 @@ void CompatMetaTensor::set_dims(const DDim& dims) {
}
else
if
(
var
->
IsType
<
phi
::
SelectedRows
>
())
{
}
else
if
(
var
->
IsType
<
phi
::
SelectedRows
>
())
{
auto
*
tensor
=
var
->
GetMutable
<
phi
::
SelectedRows
>
()
->
mutable_value
();
auto
*
tensor
=
var
->
GetMutable
<
phi
::
SelectedRows
>
()
->
mutable_value
();
phi
::
DenseTensorUtils
::
GetMutableMeta
(
tensor
)
->
dims
=
dims
;
phi
::
DenseTensorUtils
::
GetMutableMeta
(
tensor
)
->
dims
=
dims
;
}
else
if
(
var
->
IsType
<
phi
::
SparseCooTensor
>
())
{
auto
*
tensor
=
var
->
GetMutable
<
phi
::
SparseCooTensor
>
();
phi
::
DenseTensorUtils
::
GetMutableMeta
(
tensor
)
->
dims
=
dims
;
}
else
if
(
var
->
IsType
<
framework
::
LoDTensorArray
>
())
{
}
else
if
(
var
->
IsType
<
framework
::
LoDTensorArray
>
())
{
auto
*
tensor_array
=
var
->
GetMutable
<
framework
::
LoDTensorArray
>
();
auto
*
tensor_array
=
var
->
GetMutable
<
framework
::
LoDTensorArray
>
();
// Note: Here I want enforce `tensor_array->size() == 0UL`, because
// Note: Here I want enforce `tensor_array->size() == 0UL`, because
...
@@ -257,6 +291,9 @@ void CompatMetaTensor::set_dtype(phi::DataType dtype) {
...
@@ -257,6 +291,9 @@ void CompatMetaTensor::set_dtype(phi::DataType dtype) {
}
else
if
(
var
->
IsType
<
phi
::
SelectedRows
>
())
{
}
else
if
(
var
->
IsType
<
phi
::
SelectedRows
>
())
{
auto
*
tensor
=
var
->
GetMutable
<
phi
::
SelectedRows
>
()
->
mutable_value
();
auto
*
tensor
=
var
->
GetMutable
<
phi
::
SelectedRows
>
()
->
mutable_value
();
phi
::
DenseTensorUtils
::
GetMutableMeta
(
tensor
)
->
dtype
=
dtype
;
phi
::
DenseTensorUtils
::
GetMutableMeta
(
tensor
)
->
dtype
=
dtype
;
}
else
if
(
var
->
IsType
<
phi
::
SparseCooTensor
>
())
{
auto
*
tensor
=
var
->
GetMutable
<
phi
::
SparseCooTensor
>
();
phi
::
DenseTensorUtils
::
GetMutableMeta
(
tensor
)
->
dtype
=
dtype
;
}
else
if
(
var
->
IsType
<
framework
::
LoDTensorArray
>
())
{
}
else
if
(
var
->
IsType
<
framework
::
LoDTensorArray
>
())
{
// NOTE(chenweihang): do nothing
// NOTE(chenweihang): do nothing
// Unsupported set dtype for LoDTensorArray now
// Unsupported set dtype for LoDTensorArray now
...
@@ -280,6 +317,9 @@ void CompatMetaTensor::set_layout(DataLayout layout) {
...
@@ -280,6 +317,9 @@ void CompatMetaTensor::set_layout(DataLayout layout) {
}
else
if
(
var
->
IsType
<
phi
::
SelectedRows
>
())
{
}
else
if
(
var
->
IsType
<
phi
::
SelectedRows
>
())
{
auto
*
tensor
=
var
->
GetMutable
<
phi
::
SelectedRows
>
()
->
mutable_value
();
auto
*
tensor
=
var
->
GetMutable
<
phi
::
SelectedRows
>
()
->
mutable_value
();
phi
::
DenseTensorUtils
::
GetMutableMeta
(
tensor
)
->
layout
=
layout
;
phi
::
DenseTensorUtils
::
GetMutableMeta
(
tensor
)
->
layout
=
layout
;
}
else
if
(
var
->
IsType
<
phi
::
SparseCooTensor
>
())
{
auto
*
tensor
=
var
->
GetMutable
<
phi
::
SparseCooTensor
>
();
phi
::
DenseTensorUtils
::
GetMutableMeta
(
tensor
)
->
layout
=
layout
;
}
else
if
(
var
->
IsType
<
framework
::
LoDTensorArray
>
())
{
}
else
if
(
var
->
IsType
<
framework
::
LoDTensorArray
>
())
{
// NOTE(chenweihang): do nothing
// NOTE(chenweihang): do nothing
// Unsupported set dtype for LoDTensorArray now
// Unsupported set dtype for LoDTensorArray now
...
@@ -299,7 +339,7 @@ void CompatMetaTensor::share_lod(const MetaTensor& meta_tensor) {
...
@@ -299,7 +339,7 @@ void CompatMetaTensor::share_lod(const MetaTensor& meta_tensor) {
ValidCheck
(
meta_tensor
);
ValidCheck
(
meta_tensor
);
if
(
is_runtime_
)
{
if
(
is_runtime_
)
{
auto
*
var
=
PADDLE_GET
(
Variable
*
,
var_
);
auto
*
var
=
PADDLE_GET
(
Variable
*
,
var_
);
if
(
var
->
IsType
<
phi
::
DenseTensor
>
())
{
if
(
var
->
IsType
<
phi
::
DenseTensor
>
()
&&
meta_tensor
.
is_dense
()
)
{
auto
*
tensor
=
var
->
GetMutable
<
phi
::
DenseTensor
>
();
auto
*
tensor
=
var
->
GetMutable
<
phi
::
DenseTensor
>
();
phi
::
DenseTensorUtils
::
GetMutableMeta
(
tensor
)
->
lod
=
phi
::
DenseTensorUtils
::
GetMutableMeta
(
tensor
)
->
lod
=
static_cast
<
const
CompatMetaTensor
&>
(
meta_tensor
).
GetRuntimeLoD
();
static_cast
<
const
CompatMetaTensor
&>
(
meta_tensor
).
GetRuntimeLoD
();
...
@@ -309,6 +349,10 @@ void CompatMetaTensor::share_lod(const MetaTensor& meta_tensor) {
...
@@ -309,6 +349,10 @@ void CompatMetaTensor::share_lod(const MetaTensor& meta_tensor) {
}
}
}
else
{
}
else
{
auto
*
var
=
PADDLE_GET
(
VarDesc
*
,
var_
);
auto
*
var
=
PADDLE_GET
(
VarDesc
*
,
var_
);
if
(
!
meta_tensor
.
is_dense
()
&&
!
meta_tensor
.
is_tensor_array
())
{
VLOG
(
3
)
<<
"input metatensor is not LoDTensor or LoDTensorArray."
;
return
;
}
var
->
SetLoDLevel
(
var
->
SetLoDLevel
(
static_cast
<
const
CompatMetaTensor
&>
(
meta_tensor
).
GetCompileTimeLoD
());
static_cast
<
const
CompatMetaTensor
&>
(
meta_tensor
).
GetCompileTimeLoD
());
}
}
...
...
paddle/fluid/framework/infershape_utils.h
浏览文件 @
10225d22
...
@@ -59,6 +59,9 @@ class CompatMetaTensor : public phi::MetaTensor {
...
@@ -59,6 +59,9 @@ class CompatMetaTensor : public phi::MetaTensor {
bool
initialized
()
const
override
{
return
initialized_
;
};
bool
initialized
()
const
override
{
return
initialized_
;
};
bool
is_tensor_array
()
const
;
bool
is_dense
()
const
;
operator
unspecified_bool_type
()
const
override
{
operator
unspecified_bool_type
()
const
override
{
return
initialized_
?
unspecified_bool_true
:
0
;
return
initialized_
?
unspecified_bool_true
:
0
;
}
}
...
...
paddle/fluid/framework/operator.cc
浏览文件 @
10225d22
...
@@ -2382,6 +2382,17 @@ void OperatorWithKernel::ParseInputDataType(
...
@@ -2382,6 +2382,17 @@ void OperatorWithKernel::ParseInputDataType(
t
=
&
var
->
Get
<
LoDTensor
>
();
t
=
&
var
->
Get
<
LoDTensor
>
();
}
else
if
(
var
->
IsType
<
phi
::
SelectedRows
>
())
{
}
else
if
(
var
->
IsType
<
phi
::
SelectedRows
>
())
{
t
=
&
(
var
->
Get
<
phi
::
SelectedRows
>
().
value
());
t
=
&
(
var
->
Get
<
phi
::
SelectedRows
>
().
value
());
}
else
if
(
var
->
IsType
<
phi
::
SparseCooTensor
>
())
{
const
phi
::
SparseCooTensor
*
sp_t
=
&
(
var
->
Get
<
phi
::
SparseCooTensor
>
());
PADDLE_ENFORCE_EQ
(
sp_t
->
initialized
(),
true
,
platform
::
errors
::
InvalidArgument
(
"The %s Op's Input Variable `%s` "
"contains uninitialized Tensor."
,
Type
(),
name
));
*
data_type
=
paddle
::
framework
::
TransToProtoVarType
(
sp_t
->
dtype
());
return
;
}
else
if
(
var
->
IsType
<
LoDTensorArray
>
())
{
}
else
if
(
var
->
IsType
<
LoDTensorArray
>
())
{
auto
t_arr
=
&
var
->
Get
<
LoDTensorArray
>
();
auto
t_arr
=
&
var
->
Get
<
LoDTensorArray
>
();
for
(
size_t
j
=
0
;
j
<
t_arr
->
size
();
j
++
)
{
for
(
size_t
j
=
0
;
j
<
t_arr
->
size
();
j
++
)
{
...
@@ -2419,6 +2430,29 @@ void OperatorWithKernel::ParseMultiInputDataType(
...
@@ -2419,6 +2430,29 @@ void OperatorWithKernel::ParseMultiInputDataType(
t
=
&
var
->
Get
<
LoDTensor
>
();
t
=
&
var
->
Get
<
LoDTensor
>
();
}
else
if
(
var
->
IsType
<
phi
::
SelectedRows
>
())
{
}
else
if
(
var
->
IsType
<
phi
::
SelectedRows
>
())
{
t
=
&
(
var
->
Get
<
phi
::
SelectedRows
>
().
value
());
t
=
&
(
var
->
Get
<
phi
::
SelectedRows
>
().
value
());
}
else
if
(
var
->
IsType
<
phi
::
SparseCooTensor
>
())
{
const
phi
::
SparseCooTensor
*
sp_t
=
&
(
var
->
Get
<
phi
::
SparseCooTensor
>
());
PADDLE_ENFORCE_EQ
(
sp_t
->
initialized
(),
true
,
platform
::
errors
::
InvalidArgument
(
"The %s Op's Input Variable `%s` "
"contains uninitialized Tensor."
,
Type
(),
name
));
proto
::
VarType
::
Type
tmp
=
paddle
::
framework
::
TransToProtoVarType
(
sp_t
->
dtype
());
PADDLE_ENFORCE
(
tmp
==
*
data_type
||
*
data_type
==
default_data_type
,
platform
::
errors
::
InvalidArgument
(
"The DataType of %s Op's duplicable or different "
"slot Variable %s must be "
"consistent or reigster GetExpectedKernelType. The "
"current variable type is (%s), but the "
"previous variable type is (%s)."
,
Type
(),
name
,
DataTypeToString
(
tmp
),
DataTypeToString
(
*
data_type
)));
*
data_type
=
tmp
;
}
else
if
(
var
->
IsType
<
LoDTensorArray
>
())
{
}
else
if
(
var
->
IsType
<
LoDTensorArray
>
())
{
auto
t_arr
=
&
var
->
Get
<
LoDTensorArray
>
();
auto
t_arr
=
&
var
->
Get
<
LoDTensorArray
>
();
for
(
size_t
j
=
0
;
j
<
t_arr
->
size
();
j
++
)
{
for
(
size_t
j
=
0
;
j
<
t_arr
->
size
();
j
++
)
{
...
@@ -2663,6 +2697,9 @@ void OperatorWithKernel::BuildPhiKernelContext(
...
@@ -2663,6 +2697,9 @@ void OperatorWithKernel::BuildPhiKernelContext(
}
else
if
(
var
->
IsType
<
phi
::
SelectedRows
>
())
{
}
else
if
(
var
->
IsType
<
phi
::
SelectedRows
>
())
{
tensor_in
=
&
(
var
->
Get
<
phi
::
SelectedRows
>
());
tensor_in
=
&
(
var
->
Get
<
phi
::
SelectedRows
>
());
phi_kernel_context
->
EmplaceBackInputWithoutSetRange
(
tensor_in
);
phi_kernel_context
->
EmplaceBackInputWithoutSetRange
(
tensor_in
);
}
else
if
(
var
->
IsType
<
phi
::
SparseCooTensor
>
())
{
tensor_in
=
&
(
var
->
Get
<
phi
::
SparseCooTensor
>
());
phi_kernel_context
->
EmplaceBackInputWithoutSetRange
(
tensor_in
);
}
else
if
(
var
->
IsType
<
framework
::
LoDTensorArray
>
())
{
}
else
if
(
var
->
IsType
<
framework
::
LoDTensorArray
>
())
{
need_prepare_phi_data_
=
true
;
need_prepare_phi_data_
=
true
;
tensor_in
=
&
(
var
->
Get
<
framework
::
LoDTensorArray
>
());
tensor_in
=
&
(
var
->
Get
<
framework
::
LoDTensorArray
>
());
...
@@ -2708,6 +2745,9 @@ void OperatorWithKernel::BuildPhiKernelContext(
...
@@ -2708,6 +2745,9 @@ void OperatorWithKernel::BuildPhiKernelContext(
}
else
if
(
var
->
template
IsType
<
phi
::
SelectedRows
>())
{
}
else
if
(
var
->
template
IsType
<
phi
::
SelectedRows
>())
{
tensor_out
=
var
->
template
GetMutable
<
phi
::
SelectedRows
>();
tensor_out
=
var
->
template
GetMutable
<
phi
::
SelectedRows
>();
phi_kernel_context
->
EmplaceBackOutputWithoutSetRange
(
tensor_out
);
phi_kernel_context
->
EmplaceBackOutputWithoutSetRange
(
tensor_out
);
}
else
if
(
var
->
template
IsType
<
phi
::
SparseCooTensor
>())
{
tensor_out
=
var
->
template
GetMutable
<
phi
::
SparseCooTensor
>();
phi_kernel_context
->
EmplaceBackOutputWithoutSetRange
(
tensor_out
);
}
else
if
(
var
->
template
IsType
<
framework
::
LoDTensorArray
>())
{
}
else
if
(
var
->
template
IsType
<
framework
::
LoDTensorArray
>())
{
tensor_out
=
var
->
template
GetMutable
<
framework
::
LoDTensorArray
>();
tensor_out
=
var
->
template
GetMutable
<
framework
::
LoDTensorArray
>();
// Note: If the input LoDTensorArray size is 0, the output
// Note: If the input LoDTensorArray size is 0, the output
...
...
paddle/fluid/framework/operator.h
浏览文件 @
10225d22
...
@@ -524,6 +524,11 @@ class ExecutionArgumentMappingContext : public phi::ArgumentMappingContext {
...
@@ -524,6 +524,11 @@ class ExecutionArgumentMappingContext : public phi::ArgumentMappingContext {
});
});
}
}
bool
IsSparseCooTensorInput
(
const
std
::
string
&
name
)
const
override
{
const
auto
*
var
=
ctx_
.
InputVar
(
name
);
return
var
->
IsType
<
phi
::
SparseCooTensor
>
();
}
bool
IsDenseTensorOutput
(
const
std
::
string
&
name
)
const
override
{
bool
IsDenseTensorOutput
(
const
std
::
string
&
name
)
const
override
{
auto
vars
=
ctx_
.
MultiOutputVar
(
name
);
auto
vars
=
ctx_
.
MultiOutputVar
(
name
);
return
std
::
all_of
(
vars
.
begin
(),
vars
.
end
(),
[](
const
Variable
*
var
)
{
return
std
::
all_of
(
vars
.
begin
(),
vars
.
end
(),
[](
const
Variable
*
var
)
{
...
...
paddle/fluid/framework/tensor.h
浏览文件 @
10225d22
...
@@ -17,6 +17,7 @@ limitations under the License. */
...
@@ -17,6 +17,7 @@ limitations under the License. */
#include "paddle/fluid/framework/data_type.h"
#include "paddle/fluid/framework/data_type.h"
#include "paddle/fluid/framework/mixed_vector.h"
#include "paddle/fluid/framework/mixed_vector.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/sparse_coo_tensor.h"
namespace
paddle
{
namespace
paddle
{
namespace
framework
{
namespace
framework
{
...
...
paddle/fluid/framework/var_desc.cc
浏览文件 @
10225d22
...
@@ -237,6 +237,8 @@ const proto::VarType::TensorDesc &VarDesc::tensor_desc() const {
...
@@ -237,6 +237,8 @@ const proto::VarType::TensorDesc &VarDesc::tensor_desc() const {
return
desc_
.
type
().
strings
();
return
desc_
.
type
().
strings
();
case
proto
::
VarType
::
VOCAB
:
case
proto
::
VarType
::
VOCAB
:
return
desc_
.
type
().
vocab
();
return
desc_
.
type
().
vocab
();
case
proto
::
VarType
::
SPARSE_COO
:
return
desc_
.
type
().
sparse_coo
();
default:
default:
PADDLE_THROW
(
platform
::
errors
::
Unavailable
(
PADDLE_THROW
(
platform
::
errors
::
Unavailable
(
"Getting 'tensor_desc' is not supported by the %s type variable."
,
"Getting 'tensor_desc' is not supported by the %s type variable."
,
...
@@ -284,6 +286,8 @@ proto::VarType::TensorDesc *VarDesc::mutable_tensor_desc() {
...
@@ -284,6 +286,8 @@ proto::VarType::TensorDesc *VarDesc::mutable_tensor_desc() {
return
desc_
.
mutable_type
()
->
mutable_strings
();
return
desc_
.
mutable_type
()
->
mutable_strings
();
case
proto
::
VarType
::
VOCAB
:
case
proto
::
VarType
::
VOCAB
:
return
desc_
.
mutable_type
()
->
mutable_vocab
();
return
desc_
.
mutable_type
()
->
mutable_vocab
();
case
proto
::
VarType
::
SPARSE_COO
:
return
desc_
.
mutable_type
()
->
mutable_sparse_coo
();
default:
default:
PADDLE_THROW
(
PADDLE_THROW
(
platform
::
errors
::
Unavailable
(
"Getting 'mutable_tensor_desc' is not "
platform
::
errors
::
Unavailable
(
"Getting 'mutable_tensor_desc' is not "
...
...
paddle/fluid/framework/var_type.h
浏览文件 @
10225d22
...
@@ -33,6 +33,7 @@ inline proto::VarType::Type ToVarType(int type) {
...
@@ -33,6 +33,7 @@ inline proto::VarType::Type ToVarType(int type) {
switch
(
type
)
{
switch
(
type
)
{
case
proto
::
VarType
::
LOD_TENSOR
:
case
proto
::
VarType
::
LOD_TENSOR
:
case
proto
::
VarType
::
SELECTED_ROWS
:
case
proto
::
VarType
::
SELECTED_ROWS
:
case
proto
::
VarType
::
SPARSE_COO
:
case
proto
::
VarType
::
LOD_RANK_TABLE
:
case
proto
::
VarType
::
LOD_RANK_TABLE
:
case
proto
::
VarType
::
LOD_TENSOR_ARRAY
:
case
proto
::
VarType
::
LOD_TENSOR_ARRAY
:
case
proto
::
VarType
::
FETCH_LIST
:
case
proto
::
VarType
::
FETCH_LIST
:
...
@@ -59,6 +60,9 @@ inline void VisitVarType(const framework::Variable& var, Visitor visitor) {
...
@@ -59,6 +60,9 @@ inline void VisitVarType(const framework::Variable& var, Visitor visitor) {
case
proto
::
VarType
::
SELECTED_ROWS
:
case
proto
::
VarType
::
SELECTED_ROWS
:
visitor
(
var
.
Get
<
phi
::
SelectedRows
>
());
visitor
(
var
.
Get
<
phi
::
SelectedRows
>
());
return
;
return
;
case
proto
::
VarType
::
SPARSE_COO
:
visitor
(
var
.
Get
<
phi
::
SparseCooTensor
>
());
return
;
case
proto
::
VarType
::
READER
:
case
proto
::
VarType
::
READER
:
visitor
(
var
.
Get
<
ReaderHolder
>
());
visitor
(
var
.
Get
<
ReaderHolder
>
());
return
;
return
;
...
...
paddle/fluid/framework/var_type_traits.h
浏览文件 @
10225d22
...
@@ -54,6 +54,7 @@
...
@@ -54,6 +54,7 @@
namespace
phi
{
namespace
phi
{
class
DenseTensor
;
class
DenseTensor
;
class
SelectedRows
;
class
SelectedRows
;
class
SparseCooTensor
;
}
// namespace phi
}
// namespace phi
// Users should add forward declarations here
// Users should add forward declarations here
...
@@ -180,6 +181,7 @@ struct VarTypeRegistryImpl {
...
@@ -180,6 +181,7 @@ struct VarTypeRegistryImpl {
using
VarTypeRegistry
=
detail
::
VarTypeRegistryImpl
<
using
VarTypeRegistry
=
detail
::
VarTypeRegistryImpl
<
Tensor
,
Tensor
,
phi
::
SelectedRows
,
phi
::
SelectedRows
,
phi
::
SparseCooTensor
,
std
::
vector
<
Scope
*>
,
std
::
vector
<
Scope
*>
,
LoDRankTable
,
LoDRankTable
,
Strings
,
Strings
,
...
@@ -252,6 +254,7 @@ REG_PROTO_VAR_TYPE_TRAIT(float, proto::VarType::FP32);
...
@@ -252,6 +254,7 @@ REG_PROTO_VAR_TYPE_TRAIT(float, proto::VarType::FP32);
REG_PROTO_VAR_TYPE_TRAIT
(
Vocab
,
proto
::
VarType
::
VOCAB
);
REG_PROTO_VAR_TYPE_TRAIT
(
Vocab
,
proto
::
VarType
::
VOCAB
);
REG_PROTO_VAR_TYPE_TRAIT
(
String
,
proto
::
VarType
::
STRING
);
REG_PROTO_VAR_TYPE_TRAIT
(
String
,
proto
::
VarType
::
STRING
);
REG_PROTO_VAR_TYPE_TRAIT
(
Strings
,
proto
::
VarType
::
STRINGS
);
REG_PROTO_VAR_TYPE_TRAIT
(
Strings
,
proto
::
VarType
::
STRINGS
);
REG_PROTO_VAR_TYPE_TRAIT
(
phi
::
SparseCooTensor
,
proto
::
VarType
::
SPARSE_COO
);
/** End of variable type registration */
/** End of variable type registration */
...
...
paddle/fluid/framework/variable_helper.cc
浏览文件 @
10225d22
...
@@ -52,6 +52,8 @@ void InitializeVariable(Variable *var, proto::VarType::Type var_type) {
...
@@ -52,6 +52,8 @@ void InitializeVariable(Variable *var, proto::VarType::Type var_type) {
var
->
GetMutable
<
ReaderHolder
>
();
var
->
GetMutable
<
ReaderHolder
>
();
}
else
if
(
var_type
==
proto
::
VarType
::
RAW
)
{
}
else
if
(
var_type
==
proto
::
VarType
::
RAW
)
{
// GetMutable will be called in operator
// GetMutable will be called in operator
}
else
if
(
var_type
==
proto
::
VarType
::
SPARSE_COO
)
{
var
->
GetMutable
<
phi
::
SparseCooTensor
>
();
}
else
{
}
else
{
PADDLE_THROW
(
platform
::
errors
::
Unavailable
(
PADDLE_THROW
(
platform
::
errors
::
Unavailable
(
"Variable type %d is not in "
"Variable type %d is not in "
...
...
paddle/fluid/inference/tensorrt/plugin_arg_mapping_context.cc
浏览文件 @
10225d22
...
@@ -104,6 +104,10 @@ bool PluginArgumentMappingContext::IsSelectedRowsInput(
...
@@ -104,6 +104,10 @@ bool PluginArgumentMappingContext::IsSelectedRowsInput(
const
std
::
string
&
name
)
const
{
const
std
::
string
&
name
)
const
{
return
false
;
return
false
;
}
}
bool
PluginArgumentMappingContext
::
IsSparseCooTensorInput
(
const
std
::
string
&
name
)
const
{
return
false
;
}
bool
PluginArgumentMappingContext
::
IsDenseTensorVectorInput
(
bool
PluginArgumentMappingContext
::
IsDenseTensorVectorInput
(
const
std
::
string
&
name
)
const
{
const
std
::
string
&
name
)
const
{
return
false
;
return
false
;
...
...
paddle/fluid/inference/tensorrt/plugin_arg_mapping_context.h
浏览文件 @
10225d22
...
@@ -46,6 +46,8 @@ class PluginArgumentMappingContext : public ::phi::ArgumentMappingContext {
...
@@ -46,6 +46,8 @@ class PluginArgumentMappingContext : public ::phi::ArgumentMappingContext {
bool
IsSelectedRowsInput
(
const
std
::
string
&
name
)
const
override
;
bool
IsSelectedRowsInput
(
const
std
::
string
&
name
)
const
override
;
bool
IsSparseCooTensorInput
(
const
std
::
string
&
name
)
const
override
;
bool
IsDenseTensorVectorInput
(
const
std
::
string
&
name
)
const
override
;
bool
IsDenseTensorVectorInput
(
const
std
::
string
&
name
)
const
override
;
bool
IsDenseTensorOutput
(
const
std
::
string
&
name
)
const
override
;
bool
IsDenseTensorOutput
(
const
std
::
string
&
name
)
const
override
;
...
...
paddle/fluid/operators/controlflow/feed_op.cc
浏览文件 @
10225d22
...
@@ -11,6 +11,7 @@ limitations under the License. */
...
@@ -11,6 +11,7 @@ limitations under the License. */
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/framework/operator.h"
#include "paddle/phi/core/tensor_utils.h"
namespace
paddle
{
namespace
paddle
{
namespace
framework
{
namespace
framework
{
...
@@ -61,6 +62,22 @@ class FeedVariableVisitor {
...
@@ -61,6 +62,22 @@ class FeedVariableVisitor {
*
out_str
=
in_str
;
*
out_str
=
in_str
;
}
}
void
operator
()(
const
phi
::
SparseCooTensor
&
in_tensor
)
const
{
phi
::
SparseCooTensor
*
out_tensor
=
out_var_
->
GetMutable
<
phi
::
SparseCooTensor
>
();
if
(
platform
::
is_same_place
(
in_tensor
.
place
(),
place_
))
{
*
out_tensor
=
in_tensor
;
}
else
{
platform
::
DeviceContext
*
context
=
platform
::
DeviceContextPool
::
Instance
().
Get
(
place_
);
phi
::
DenseTensor
indices
,
values
;
framework
::
TensorCopy
(
in_tensor
.
indices
(),
place_
,
*
context
,
&
indices
);
framework
::
TensorCopy
(
in_tensor
.
values
(),
place_
,
*
context
,
&
values
);
out_tensor
->
SetMember
(
indices
,
values
,
in_tensor
.
meta
());
}
}
private:
private:
framework
::
Variable
*
out_var_
;
framework
::
Variable
*
out_var_
;
const
platform
::
Place
&
place_
;
const
platform
::
Place
&
place_
;
...
...
paddle/fluid/operators/controlflow/fetch_op.cc
浏览文件 @
10225d22
...
@@ -123,6 +123,9 @@ class FetchOp : public framework::OperatorBase {
...
@@ -123,6 +123,9 @@ class FetchOp : public framework::OperatorBase {
auto
&
src_item
=
fetch_var
->
Get
<
framework
::
Vocab
>
();
auto
&
src_item
=
fetch_var
->
Get
<
framework
::
Vocab
>
();
auto
*
dst_item
=
&
(
PADDLE_GET
(
framework
::
Vocab
,
fetch_list
->
at
(
col
)));
auto
*
dst_item
=
&
(
PADDLE_GET
(
framework
::
Vocab
,
fetch_list
->
at
(
col
)));
*
dst_item
=
src_item
;
*
dst_item
=
src_item
;
}
else
if
(
fetch_var
->
IsType
<
phi
::
SparseCooTensor
>
())
{
auto
&
src_item
=
fetch_var
->
Get
<
phi
::
SparseCooTensor
>
();
fetch_list
->
at
(
col
)
=
src_item
;
}
else
{
}
else
{
auto
&
src_item
=
fetch_var
->
Get
<
framework
::
LoDTensorArray
>
();
auto
&
src_item
=
fetch_var
->
Get
<
framework
::
LoDTensorArray
>
();
framework
::
LoDTensorArray
tmp
(
src_item
.
size
());
framework
::
LoDTensorArray
tmp
(
src_item
.
size
());
...
...
paddle/fluid/operators/controlflow/fetch_v2_op.cc
浏览文件 @
10225d22
...
@@ -98,6 +98,12 @@ class FetchV2Op : public framework::OperatorWithKernel {
...
@@ -98,6 +98,12 @@ class FetchV2Op : public framework::OperatorWithKernel {
return
framework
::
OpKernelType
(
framework
::
proto
::
VarType
::
FP32
,
return
framework
::
OpKernelType
(
framework
::
proto
::
VarType
::
FP32
,
platform
::
CPUPlace
());
platform
::
CPUPlace
());
}
}
}
else
if
(
fetch_var
->
IsType
<
phi
::
SparseCooTensor
>
())
{
auto
&
src_item
=
fetch_var
->
Get
<
phi
::
SparseCooTensor
>
();
if
(
!
src_item
.
initialized
())
{
return
framework
::
OpKernelType
(
framework
::
proto
::
VarType
::
FP32
,
platform
::
CPUPlace
());
}
}
else
{
}
else
{
auto
&
src_item
=
fetch_var
->
Get
<
framework
::
LoDTensorArray
>
();
auto
&
src_item
=
fetch_var
->
Get
<
framework
::
LoDTensorArray
>
();
if
(
src_item
.
empty
()
||
!
src_item
[
0
].
IsInitialized
())
{
if
(
src_item
.
empty
()
||
!
src_item
[
0
].
IsInitialized
())
{
...
@@ -163,6 +169,12 @@ class FetchV2Kernel {
...
@@ -163,6 +169,12 @@ class FetchV2Kernel {
dst_item
->
ShareDataWith
(
src_item
);
dst_item
->
ShareDataWith
(
src_item
);
dst_item
->
set_lod
(
src_item
.
lod
());
dst_item
->
set_lod
(
src_item
.
lod
());
}
}
}
else
if
(
fetch_var
->
IsType
<
phi
::
SparseCooTensor
>
())
{
auto
&
src_item
=
fetch_var
->
Get
<
phi
::
SparseCooTensor
>
();
if
(
!
src_item
.
initialized
())
{
return
;
}
fetch_list
->
at
(
col
)
=
src_item
;
}
else
{
}
else
{
auto
&
src_item
=
fetch_var
->
Get
<
framework
::
LoDTensorArray
>
();
auto
&
src_item
=
fetch_var
->
Get
<
framework
::
LoDTensorArray
>
();
framework
::
LoDTensorArray
tmp
(
src_item
.
size
());
framework
::
LoDTensorArray
tmp
(
src_item
.
size
());
...
...
paddle/fluid/operators/sparse_manual_op.cc
0 → 100644
浏览文件 @
10225d22
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <memory>
#include <string>
#include "paddle/fluid/framework/infershape_utils.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/operator.h"
#include "paddle/phi/core/infermeta_utils.h"
#include "paddle/phi/infermeta/binary.h"
#include "paddle/phi/infermeta/sparse/binary.h"
#include "paddle/phi/infermeta/sparse/unary.h"
#include "paddle/phi/infermeta/unary.h"
namespace
paddle
{
namespace
operators
{
class
SparseSparseCooTensorOpMaker
:
public
framework
::
OpProtoAndCheckerMaker
{
public:
void
Make
()
override
{
AddInput
(
"values"
,
"(Tensor), input 0 of sparse_coo_tensor op."
);
AddInput
(
"indices"
,
"(Tensor), input 1 of sparse_coo_tensor op."
);
AddOutput
(
"out"
,
"(Tensor), output 0 of sparse_coo_tensor op."
);
AddAttr
<
std
::
vector
<
int
>>
(
"dense_shape"
,
"(vector<int>), attribute 0 for sparse_coo_tensor op."
);
AddComment
(
R"DOC(
TODO: Documentation of sparse_coo_tensor op.
)DOC"
);
}
};
class
SparseSparseCooTensorOp
:
public
framework
::
OperatorWithKernel
{
public:
using
framework
::
OperatorWithKernel
::
OperatorWithKernel
;
};
DECLARE_INFER_SHAPE_FUNCTOR
(
sparse_sparse_coo_tensor
,
SparseSparseCooTensorInferShapeFunctor
,
PD_INFER_META
(
phi
::
sparse
::
SparseCooTensorInferMeta
));
class
SparseValuesOpMaker
:
public
framework
::
OpProtoAndCheckerMaker
{
public:
void
Make
()
override
{
AddInput
(
"x"
,
"(Tensor), input 0 of sparse_values op."
);
AddOutput
(
"out"
,
"(Tensor), output 0 of sparse_values op."
);
AddComment
(
R"DOC(
TODO: Documentation of sparse_values op.
)DOC"
);
}
};
class
SparseValuesOp
:
public
framework
::
OperatorWithKernel
{
public:
using
framework
::
OperatorWithKernel
::
OperatorWithKernel
;
};
DECLARE_INFER_SHAPE_FUNCTOR
(
sparse_values
,
SparseValuesInferShapeFunctor
,
PD_INFER_META
(
phi
::
sparse
::
ValuesInferMeta
));
class
SparseIndicesOpMaker
:
public
framework
::
OpProtoAndCheckerMaker
{
public:
void
Make
()
override
{
AddInput
(
"x"
,
"(Tensor), input 0 of sparse_indices op."
);
AddOutput
(
"out"
,
"(Tensor), output 0 of sparse_indices op."
);
AddComment
(
R"DOC(
TODO: Documentation of sparse_indices op.
)DOC"
);
}
};
class
SparseIndicesOp
:
public
framework
::
OperatorWithKernel
{
public:
using
framework
::
OperatorWithKernel
::
OperatorWithKernel
;
};
DECLARE_INFER_SHAPE_FUNCTOR
(
sparse_indices
,
SparseIndicesInferShapeFunctor
,
PD_INFER_META
(
phi
::
sparse
::
IndicesInferMeta
));
class
SparseToDenseOpMaker
:
public
framework
::
OpProtoAndCheckerMaker
{
public:
void
Make
()
override
{
AddInput
(
"x"
,
"(Tensor), input 0 of sparse_to_dense op."
);
AddOutput
(
"out"
,
"(Tensor), output 0 of sparse_to_dense op."
);
AddComment
(
R"DOC(
TODO: Documentation of sparse_to_dense op.
)DOC"
);
}
};
class
SparseToDenseOp
:
public
framework
::
OperatorWithKernel
{
public:
using
framework
::
OperatorWithKernel
::
OperatorWithKernel
;
};
DECLARE_INFER_SHAPE_FUNCTOR
(
sparse_to_dense
,
SparseToDenseInferShapeFunctor
,
PD_INFER_META
(
phi
::
UnchangedInferMeta
));
class
SparseReluOpMaker
:
public
framework
::
OpProtoAndCheckerMaker
{
public:
void
Make
()
override
{
AddInput
(
"x"
,
"(Tensor), input 0 of sparse_relu op."
);
AddOutput
(
"out"
,
"(Tensor), output 0 of sparse_relu op."
);
AddComment
(
R"DOC(
TODO: Documentation of sparse_relu op.
)DOC"
);
}
};
class
SparseReluOp
:
public
framework
::
OperatorWithKernel
{
public:
using
framework
::
OperatorWithKernel
::
OperatorWithKernel
;
};
DECLARE_INFER_SHAPE_FUNCTOR
(
sparse_relu
,
SparseReluInferShapeFunctor
,
PD_INFER_META
(
phi
::
UnchangedInferMeta
));
class
SparseConv3dOpMaker
:
public
framework
::
OpProtoAndCheckerMaker
{
public:
void
Make
()
override
{
AddInput
(
"x"
,
"(Tensor), input 0 of sparse_conv3d op."
);
AddInput
(
"kernel"
,
"(Tensor), input 1 of sparse_conv3d op."
);
AddOutput
(
"out"
,
"(Tensor), output 0 of sparse_conv3d op."
);
AddOutput
(
"rulebook"
,
"(Tensor), output 1 of sparse_conv3d op."
);
AddOutput
(
"counter"
,
"(Tensor), output 2 of sparse_conv3d op."
);
AddAttr
<
std
::
vector
<
int
>>
(
"paddings"
,
"(vector<int>), attribute 0 for sparse_conv3d op."
);
AddAttr
<
std
::
vector
<
int
>>
(
"dilations"
,
"(vector<int>), attribute 1 for sparse_conv3d op."
);
AddAttr
<
std
::
vector
<
int
>>
(
"strides"
,
"(vector<int>), attribute 2 for sparse_conv3d op."
);
AddAttr
<
int
>
(
"groups"
,
"(int), attribute 3 for sparse_conv3d op."
);
AddAttr
<
bool
>
(
"subm"
,
"(bool), attribute 4 for conv3d_coo op."
);
AddAttr
<
std
::
string
>
(
"key"
,
"(string), attribute 5 for sparse_conv3d op."
)
.
SetDefault
(
""
);
AddComment
(
R"DOC(
TODO: Documentation of sparse_conv3d op.
)DOC"
);
}
};
class
SparseConv3dOp
:
public
framework
::
OperatorWithKernel
{
public:
using
framework
::
OperatorWithKernel
::
OperatorWithKernel
;
};
DECLARE_INFER_SHAPE_FUNCTOR
(
sparse_conv3d
,
SparseConv3dInferShapeFunctor
,
PD_INFER_META
(
phi
::
sparse
::
Conv3dInferMeta
));
class
SparseAddOpMaker
:
public
framework
::
OpProtoAndCheckerMaker
{
public:
void
Make
()
override
{
AddInput
(
"x"
,
"(Tensor), input 0 of sparse_add op."
);
AddInput
(
"y"
,
"(Tensor), input 1 of sparse_add op."
);
AddOutput
(
"out"
,
"(Tensor), output 0 of sparse_add op."
);
AddComment
(
R"DOC(
TODO: Documentation of sparse_add op.
)DOC"
);
}
};
class
SparseAddOp
:
public
framework
::
OperatorWithKernel
{
public:
using
framework
::
OperatorWithKernel
::
OperatorWithKernel
;
};
DECLARE_INFER_SHAPE_FUNCTOR
(
sparse_add
,
SparseAddInferShapeFunctor
,
PD_INFER_META
(
phi
::
UnchangedInferMeta
));
}
// namespace operators
}
// namespace paddle
namespace
ops
=
paddle
::
operators
;
REGISTER_OPERATOR
(
sparse_sparse_coo_tensor
,
ops
::
SparseSparseCooTensorOp
,
ops
::
SparseSparseCooTensorOpMaker
,
ops
::
SparseSparseCooTensorInferShapeFunctor
);
REGISTER_OPERATOR
(
sparse_values
,
ops
::
SparseValuesOp
,
ops
::
SparseValuesOpMaker
,
ops
::
SparseValuesInferShapeFunctor
);
REGISTER_OPERATOR
(
sparse_indices
,
ops
::
SparseIndicesOp
,
ops
::
SparseIndicesOpMaker
,
ops
::
SparseIndicesInferShapeFunctor
);
REGISTER_OPERATOR
(
sparse_to_dense
,
ops
::
SparseToDenseOp
,
ops
::
SparseToDenseOpMaker
,
ops
::
SparseToDenseInferShapeFunctor
);
REGISTER_OPERATOR
(
sparse_relu
,
ops
::
SparseReluOp
,
ops
::
SparseReluOpMaker
,
ops
::
SparseReluInferShapeFunctor
);
REGISTER_OPERATOR
(
sparse_conv3d
,
ops
::
SparseConv3dOp
,
ops
::
SparseConv3dOpMaker
,
ops
::
SparseConv3dInferShapeFunctor
);
REGISTER_OPERATOR
(
sparse_add
,
ops
::
SparseAddOp
,
ops
::
SparseAddOpMaker
,
ops
::
SparseAddInferShapeFunctor
);
paddle/fluid/pybind/protobuf.cc
浏览文件 @
10225d22
...
@@ -275,7 +275,8 @@ void BindVarDsec(pybind11::module *m) {
...
@@ -275,7 +275,8 @@ void BindVarDsec(pybind11::module *m) {
.
value
(
"RAW"
,
pd
::
proto
::
VarType
::
RAW
)
.
value
(
"RAW"
,
pd
::
proto
::
VarType
::
RAW
)
.
value
(
"STRING"
,
pd
::
proto
::
VarType
::
STRING
)
.
value
(
"STRING"
,
pd
::
proto
::
VarType
::
STRING
)
.
value
(
"STRINGS"
,
pd
::
proto
::
VarType
::
STRINGS
)
.
value
(
"STRINGS"
,
pd
::
proto
::
VarType
::
STRINGS
)
.
value
(
"VOCAB"
,
pd
::
proto
::
VarType
::
VOCAB
);
.
value
(
"VOCAB"
,
pd
::
proto
::
VarType
::
VOCAB
)
.
value
(
"SPARSE_COO"
,
pd
::
proto
::
VarType
::
SPARSE_COO
);
}
}
void
BindOpDesc
(
pybind11
::
module
*
m
)
{
void
BindOpDesc
(
pybind11
::
module
*
m
)
{
...
...
paddle/fluid/pybind/pybind.cc
浏览文件 @
10225d22
...
@@ -1918,6 +1918,9 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -1918,6 +1918,9 @@ All parameter, weight, gradient are variables in Paddle.
if
(
data_is_lod_tensor
(
self
[
i
]))
{
if
(
data_is_lod_tensor
(
self
[
i
]))
{
auto
&
data
=
PADDLE_GET
(
LoDTensor
,
self
[
i
]);
auto
&
data
=
PADDLE_GET
(
LoDTensor
,
self
[
i
]);
res
[
i
]
=
py
::
cast
(
std
::
move
(
data
));
res
[
i
]
=
py
::
cast
(
std
::
move
(
data
));
}
else
if
(
data_is_sparse_coo_tensor
(
self
[
i
]))
{
auto
&
data
=
PADDLE_GET
(
phi
::
SparseCooTensor
,
self
[
i
]);
res
[
i
]
=
py
::
cast
(
std
::
move
(
data
));
}
else
{
}
else
{
auto
&
data
=
PADDLE_GET
(
LoDTensorArray
,
self
[
i
]);
auto
&
data
=
PADDLE_GET
(
LoDTensorArray
,
self
[
i
]);
py
::
list
tmp
(
data
.
size
());
py
::
list
tmp
(
data
.
size
());
...
...
paddle/fluid/pybind/tensor.cc
浏览文件 @
10225d22
...
@@ -1105,6 +1105,20 @@ void BindTensor(pybind11::module &m) { // NOLINT
...
@@ -1105,6 +1105,20 @@ void BindTensor(pybind11::module &m) { // NOLINT
std
::
copy
(
rows
.
begin
(),
rows
.
end
(),
std
::
back_inserter
(
new_rows
));
std
::
copy
(
rows
.
begin
(),
rows
.
end
(),
std
::
back_inserter
(
new_rows
));
return
new_rows
;
return
new_rows
;
});
});
py
::
class_
<
phi
::
SparseCooTensor
>
(
m
,
"SparseCooTensor"
)
.
def
(
"__init__"
,
[](
phi
::
SparseCooTensor
&
instance
)
{
new
(
&
instance
)
phi
::
SparseCooTensor
();
})
.
def
(
"numel"
,
[](
const
phi
::
SparseCooTensor
&
self
)
->
int64_t
{
return
self
.
numel
();
})
.
def
(
"indices"
,
[](
const
phi
::
SparseCooTensor
&
self
)
->
framework
::
Tensor
{
return
self
.
indices
();
});
}
}
}
// namespace pybind
}
// namespace pybind
...
...
paddle/phi/api/lib/api_gen_utils.cc
浏览文件 @
10225d22
...
@@ -230,6 +230,9 @@ phi::SelectedRows* SetSelectedRowsKernelOutput(Tensor* out) {
...
@@ -230,6 +230,9 @@ phi::SelectedRows* SetSelectedRowsKernelOutput(Tensor* out) {
}
}
phi
::
TensorBase
*
SetSparseKernelOutput
(
Tensor
*
out
,
TensorType
type
)
{
phi
::
TensorBase
*
SetSparseKernelOutput
(
Tensor
*
out
,
TensorType
type
)
{
if
(
!
out
)
{
return
nullptr
;
}
if
(
!
out
->
initialized
())
{
if
(
!
out
->
initialized
())
{
if
(
type
==
TensorType
::
SPARSE_COO
)
{
if
(
type
==
TensorType
::
SPARSE_COO
)
{
auto
sparse_tensor
=
std
::
make_shared
<
phi
::
SparseCooTensor
>
(
auto
sparse_tensor
=
std
::
make_shared
<
phi
::
SparseCooTensor
>
(
...
...
paddle/phi/api/yaml/sparse_backward.yaml
浏览文件 @
10225d22
...
@@ -36,11 +36,12 @@
...
@@ -36,11 +36,12 @@
args
:
(Tensor x, Tensor y, Tensor out_grad)
args
:
(Tensor x, Tensor y, Tensor out_grad)
output
:
Tensor(x_grad), Tensor(y_grad)
output
:
Tensor(x_grad), Tensor(y_grad)
infer_meta
:
infer_meta
:
func
:
GeneralBinaryGradInferMeta
func
:
GeneralBinaryGradInferMeta
param
:
[
x
,
y
]
param
:
[
x
,
y
]
kernel
:
kernel
:
func
:
add_coo_coo_grad{sparse_coo, sparse_coo, sparse_coo -> sparse_coo, sparse_coo},
func
:
add_coo_coo_grad{sparse_coo, sparse_coo, sparse_coo -> sparse_coo, sparse_coo},
add_csr_csr_grad{sparse_csr, sparse_csr, sparse_csr -> sparse_csr, sparse_csr}
add_csr_csr_grad{sparse_csr, sparse_csr, sparse_csr -> sparse_csr, sparse_csr},
add_coo_dense_grad{sparse_coo, dense, sparse_coo -> sparse_coo, dense}
-
backward_op
:
addmm_grad
-
backward_op
:
addmm_grad
forward
:
addmm(Tensor input, Tensor x, Tensor y, float alpha=1.0, float beta=1.0) -> Tensor(out)
forward
:
addmm(Tensor input, Tensor x, Tensor y, float alpha=1.0, float beta=1.0) -> Tensor(out)
...
@@ -104,7 +105,7 @@
...
@@ -104,7 +105,7 @@
args
:
(Tensor x, Tensor out_grad, DataType value_dtype)
args
:
(Tensor x, Tensor out_grad, DataType value_dtype)
output
:
Tensor(x_grad)
output
:
Tensor(x_grad)
infer_meta
:
infer_meta
:
func
:
UnchangedInferMeta
func
:
UnchangedInferMeta
param
:
[
x
]
param
:
[
x
]
kernel
:
kernel
:
func
:
cast_coo_grad {sparse_coo, sparse_coo -> sparse_coo},
func
:
cast_coo_grad {sparse_coo, sparse_coo -> sparse_coo},
...
@@ -126,7 +127,7 @@
...
@@ -126,7 +127,7 @@
args
:
(Tensor x, Tensor y, Tensor out, Tensor out_grad)
args
:
(Tensor x, Tensor y, Tensor out, Tensor out_grad)
output
:
Tensor(x_grad), Tensor(y_grad)
output
:
Tensor(x_grad), Tensor(y_grad)
infer_meta
:
infer_meta
:
func
:
GeneralBinaryGradInferMeta
func
:
GeneralBinaryGradInferMeta
param
:
[
x
,
y
]
param
:
[
x
,
y
]
kernel
:
kernel
:
func
:
divide_coo_coo_grad{sparse_coo, sparse_coo, sparse_coo, sparse_coo -> sparse_coo, sparse_coo},
func
:
divide_coo_coo_grad{sparse_coo, sparse_coo, sparse_coo, sparse_coo -> sparse_coo, sparse_coo},
...
@@ -209,7 +210,7 @@
...
@@ -209,7 +210,7 @@
args
:
(Tensor x, Tensor y, Tensor out_grad)
args
:
(Tensor x, Tensor y, Tensor out_grad)
output
:
Tensor(x_grad), Tensor(y_grad)
output
:
Tensor(x_grad), Tensor(y_grad)
infer_meta
:
infer_meta
:
func
:
GeneralBinaryGradInferMeta
func
:
GeneralBinaryGradInferMeta
param
:
[
x
,
y
]
param
:
[
x
,
y
]
kernel
:
kernel
:
func
:
multiply_coo_coo_grad{sparse_coo, sparse_coo, sparse_coo -> sparse_coo, sparse_coo},
func
:
multiply_coo_coo_grad{sparse_coo, sparse_coo, sparse_coo -> sparse_coo, sparse_coo},
...
@@ -337,7 +338,7 @@
...
@@ -337,7 +338,7 @@
args
:
(Tensor x, Tensor y, Tensor out_grad)
args
:
(Tensor x, Tensor y, Tensor out_grad)
output
:
Tensor(x_grad), Tensor(y_grad)
output
:
Tensor(x_grad), Tensor(y_grad)
infer_meta
:
infer_meta
:
func
:
GeneralBinaryGradInferMeta
func
:
GeneralBinaryGradInferMeta
param
:
[
x
,
y
]
param
:
[
x
,
y
]
kernel
:
kernel
:
func
:
subtract_coo_coo_grad{sparse_coo, sparse_coo, sparse_coo -> sparse_coo, sparse_coo},
func
:
subtract_coo_coo_grad{sparse_coo, sparse_coo, sparse_coo -> sparse_coo, sparse_coo},
...
@@ -399,7 +400,7 @@
...
@@ -399,7 +400,7 @@
args
:
(Tensor query, Tensor key, Tensor value, Tensor softmax, Tensor out_grad)
args
:
(Tensor query, Tensor key, Tensor value, Tensor softmax, Tensor out_grad)
output
:
Tensor(query_grad), Tensor(key_grad), Tensor(value_grad)
output
:
Tensor(query_grad), Tensor(key_grad), Tensor(value_grad)
infer_meta
:
infer_meta
:
func
:
sparse::FusedAttentionGradInferMeta
func
:
sparse::FusedAttentionGradInferMeta
kernel
:
kernel
:
func
:
fused_attention_csr_grad{dense, dense, dense, sparse_csr, dense -> dense, dense, dense}
func
:
fused_attention_csr_grad{dense, dense, dense, sparse_csr, dense -> dense, dense, dense}
layout
:
softmax
layout
:
softmax
...
...
paddle/phi/api/yaml/sparse_ops.yaml
浏览文件 @
10225d22
...
@@ -35,10 +35,11 @@
...
@@ -35,10 +35,11 @@
args
:
(Tensor x, Tensor y)
args
:
(Tensor x, Tensor y)
output
:
Tensor(out)
output
:
Tensor(out)
infer_meta
:
infer_meta
:
func
:
ElementwiseInferMeta
func
:
ElementwiseInferMeta
kernel
:
kernel
:
func
:
add_coo_coo{sparse_coo, sparse_coo -> sparse_coo},
func
:
add_coo_coo{sparse_coo, sparse_coo -> sparse_coo},
add_csr_csr{sparse_csr, sparse_csr -> sparse_csr}
add_csr_csr{sparse_csr, sparse_csr -> sparse_csr}
add_coo_dense{sparse_coo, dense -> sparse_coo},
layout
:
x
layout
:
x
backward
:
add_grad
backward
:
add_grad
...
@@ -114,7 +115,7 @@
...
@@ -114,7 +115,7 @@
args
:
(Tensor x, Tensor y)
args
:
(Tensor x, Tensor y)
output
:
Tensor(out)
output
:
Tensor(out)
infer_meta
:
infer_meta
:
func
:
ElementwiseInferMeta
func
:
ElementwiseInferMeta
kernel
:
kernel
:
func
:
divide_coo_coo{sparse_coo, sparse_coo -> sparse_coo},
func
:
divide_coo_coo{sparse_coo, sparse_coo -> sparse_coo},
divide_csr_csr{sparse_csr, sparse_csr -> sparse_csr}
divide_csr_csr{sparse_csr, sparse_csr -> sparse_csr}
...
...
paddle/phi/core/compat/arg_map_context.h
浏览文件 @
10225d22
...
@@ -108,6 +108,7 @@ class ArgumentMappingContext {
...
@@ -108,6 +108,7 @@ class ArgumentMappingContext {
virtual
bool
IsDenseTensorInput
(
const
std
::
string
&
name
)
const
=
0
;
virtual
bool
IsDenseTensorInput
(
const
std
::
string
&
name
)
const
=
0
;
virtual
bool
IsDenseTensorInputs
(
const
std
::
string
&
name
)
const
=
0
;
virtual
bool
IsDenseTensorInputs
(
const
std
::
string
&
name
)
const
=
0
;
virtual
bool
IsSelectedRowsInput
(
const
std
::
string
&
name
)
const
=
0
;
virtual
bool
IsSelectedRowsInput
(
const
std
::
string
&
name
)
const
=
0
;
virtual
bool
IsSparseCooTensorInput
(
const
std
::
string
&
name
)
const
=
0
;
// For compatibility with LoDTensorArray
// For compatibility with LoDTensorArray
virtual
bool
IsDenseTensorVectorInput
(
const
std
::
string
&
name
)
const
=
0
;
virtual
bool
IsDenseTensorVectorInput
(
const
std
::
string
&
name
)
const
=
0
;
...
...
paddle/phi/core/meta_tensor.cc
浏览文件 @
10225d22
...
@@ -164,6 +164,9 @@ void MetaTensor::share_meta(const MetaTensor& meta_tensor) {
...
@@ -164,6 +164,9 @@ void MetaTensor::share_meta(const MetaTensor& meta_tensor) {
}
}
}
}
bool
MetaTensor
::
is_dense
()
const
{
return
DenseTensor
::
classof
(
tensor_
);
}
bool
MetaTensor
::
is_tensor_array
()
const
{
return
false
;
}
void
MetaTensor
::
share_dims
(
const
MetaTensor
&
meta_tensor
)
{
void
MetaTensor
::
share_dims
(
const
MetaTensor
&
meta_tensor
)
{
ValidCheck
(
*
this
);
ValidCheck
(
*
this
);
bool
is_dense_tensor
=
phi
::
DenseTensor
::
classof
(
tensor_
);
bool
is_dense_tensor
=
phi
::
DenseTensor
::
classof
(
tensor_
);
...
...
paddle/phi/core/meta_tensor.h
浏览文件 @
10225d22
...
@@ -68,6 +68,12 @@ class MetaTensor {
...
@@ -68,6 +68,12 @@ class MetaTensor {
virtual
bool
initialized
()
const
;
virtual
bool
initialized
()
const
;
virtual
bool
is_dense
()
const
;
// TODO(YuanRisheng) This API is for compatible with
// Fluid
// and it will be deleted in the future.
virtual
bool
is_tensor_array
()
const
;
virtual
operator
unspecified_bool_type
()
const
{
virtual
operator
unspecified_bool_type
()
const
{
return
tensor_
==
nullptr
?
0
:
unspecified_bool_true
;
return
tensor_
==
nullptr
?
0
:
unspecified_bool_true
;
}
}
...
...
paddle/phi/kernels/sparse/cpu/elementwise_grad_kernel.cc
浏览文件 @
10225d22
...
@@ -415,3 +415,14 @@ PD_REGISTER_KERNEL(divide_coo_coo_grad,
...
@@ -415,3 +415,14 @@ PD_REGISTER_KERNEL(divide_coo_coo_grad,
kernel
->
InputAt
(
2
).
SetDataLayout
(
phi
::
DataLayout
::
SPARSE_COO
);
kernel
->
InputAt
(
2
).
SetDataLayout
(
phi
::
DataLayout
::
SPARSE_COO
);
kernel
->
InputAt
(
3
).
SetDataLayout
(
phi
::
DataLayout
::
SPARSE_COO
);
kernel
->
InputAt
(
3
).
SetDataLayout
(
phi
::
DataLayout
::
SPARSE_COO
);
}
}
PD_REGISTER_KERNEL
(
add_coo_dense_grad
,
CPU
,
ALL_LAYOUT
,
phi
::
sparse
::
ElementWiseAddDenseGradKernel
,
float
,
double
,
int
,
int64_t
)
{
kernel
->
InputAt
(
0
).
SetDataLayout
(
phi
::
DataLayout
::
SPARSE_COO
);
}
paddle/phi/kernels/sparse/cpu/elementwise_kernel.cc
浏览文件 @
10225d22
...
@@ -16,6 +16,7 @@ limitations under the License. */
...
@@ -16,6 +16,7 @@ limitations under the License. */
#include "paddle/phi/core/enforce.h"
#include "paddle/phi/core/enforce.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/tensor_meta.h"
#include "paddle/phi/core/tensor_meta.h"
#include "paddle/phi/core/tensor_utils.h"
#include "paddle/phi/core/visit_type.h"
#include "paddle/phi/core/visit_type.h"
#include "paddle/phi/kernels/elementwise_add_kernel.h"
#include "paddle/phi/kernels/elementwise_add_kernel.h"
#include "paddle/phi/kernels/elementwise_kernel.h"
#include "paddle/phi/kernels/elementwise_kernel.h"
...
@@ -156,6 +157,21 @@ void ElementWiseCooKernelImpl(const Context& dev_ctx,
...
@@ -156,6 +157,21 @@ void ElementWiseCooKernelImpl(const Context& dev_ctx,
"shape = [%s], Y's shape = [%s]."
,
"shape = [%s], Y's shape = [%s]."
,
x
.
dims
(),
x
.
dims
(),
y
.
dims
()));
y
.
dims
()));
// temporary policy: for broadcast add
// TODO(zhangkaihuo): implement a correct function
const
bool
is_add
=
std
::
is_same
<
Functor
,
funcs
::
AddFunctor
<
T
>>::
value
;
if
(
is_add
&&
x
.
indices
().
numel
()
==
y
.
indices
().
numel
())
{
int
compare_indices
=
memcmp
(
x
.
indices
().
data
<
IntT
>
(),
y
.
indices
().
data
<
IntT
>
(),
sizeof
(
IntT
)
*
x
.
indices
().
numel
());
if
(
compare_indices
==
0
)
{
EmptyLikeCooKernel
<
T
,
Context
>
(
dev_ctx
,
x
,
out
);
phi
::
AddKernel
<
T
,
Context
>
(
dev_ctx
,
x
.
values
(),
y
.
values
(),
out
->
mutable_values
());
return
;
}
}
int64_t
element_size
=
1
;
int64_t
element_size
=
1
;
for
(
auto
j
=
1
;
j
<
x
.
values
().
dims
().
size
();
++
j
)
{
for
(
auto
j
=
1
;
j
<
x
.
values
().
dims
().
size
();
++
j
)
{
element_size
*=
x
.
values
().
dims
()[
j
];
element_size
*=
x
.
values
().
dims
()[
j
];
...
@@ -435,3 +451,14 @@ PD_REGISTER_KERNEL(divide_coo_coo,
...
@@ -435,3 +451,14 @@ PD_REGISTER_KERNEL(divide_coo_coo,
kernel
->
InputAt
(
0
).
SetDataLayout
(
phi
::
DataLayout
::
SPARSE_COO
);
kernel
->
InputAt
(
0
).
SetDataLayout
(
phi
::
DataLayout
::
SPARSE_COO
);
kernel
->
InputAt
(
1
).
SetDataLayout
(
phi
::
DataLayout
::
SPARSE_COO
);
kernel
->
InputAt
(
1
).
SetDataLayout
(
phi
::
DataLayout
::
SPARSE_COO
);
}
}
PD_REGISTER_KERNEL
(
add_coo_dense
,
CPU
,
ALL_LAYOUT
,
phi
::
sparse
::
ElementWiseAddDenseKernel
,
float
,
double
,
int
,
int64_t
)
{
kernel
->
InputAt
(
0
).
SetDataLayout
(
phi
::
DataLayout
::
SPARSE_COO
);
}
paddle/phi/kernels/sparse/elementwise_grad_kernel.h
浏览文件 @
10225d22
...
@@ -14,6 +14,9 @@ limitations under the License. */
...
@@ -14,6 +14,9 @@ limitations under the License. */
#pragma once
#pragma once
#include "paddle/phi/kernels/elementwise_add_grad_kernel.h"
#include "paddle/phi/kernels/sparse/empty_kernel.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/sparse_coo_tensor.h"
#include "paddle/phi/core/sparse_coo_tensor.h"
#include "paddle/phi/core/sparse_csr_tensor.h"
#include "paddle/phi/core/sparse_csr_tensor.h"
...
@@ -119,5 +122,27 @@ std::vector<SparseCooTensor> ElementWiseDivideCooGrad(
...
@@ -119,5 +122,27 @@ std::vector<SparseCooTensor> ElementWiseDivideCooGrad(
return
std
::
vector
<
SparseCooTensor
>
{
dx
,
dy
};
return
std
::
vector
<
SparseCooTensor
>
{
dx
,
dy
};
}
}
template
<
typename
T
,
typename
Context
>
void
ElementWiseAddDenseGradKernel
(
const
Context
&
dev_ctx
,
const
SparseCooTensor
&
x
,
const
DenseTensor
&
y
,
const
SparseCooTensor
&
dout
,
SparseCooTensor
*
dx
,
DenseTensor
*
dy
)
{
DenseTensor
*
x_values_grad
=
nullptr
;
DenseTensor
*
y_grad
=
nullptr
;
if
(
dx
)
{
EmptyLikeCooKernel
<
T
,
Context
>
(
dev_ctx
,
x
,
dx
);
x_values_grad
=
dx
->
mutable_values
();
}
if
(
dy
)
{
*
dy
=
phi
::
EmptyLike
<
T
>
(
dev_ctx
,
y
);
y_grad
=
dy
;
}
phi
::
AddGradKernel
<
T
,
Context
>
(
dev_ctx
,
x
.
values
(),
y
,
dout
.
values
(),
-
1
,
x_values_grad
,
y_grad
);
}
}
// namespace sparse
}
// namespace sparse
}
// namespace phi
}
// namespace phi
paddle/phi/kernels/sparse/elementwise_kernel.h
浏览文件 @
10225d22
...
@@ -14,6 +14,10 @@ limitations under the License. */
...
@@ -14,6 +14,10 @@ limitations under the License. */
#pragma once
#pragma once
#include "paddle/phi/kernels/elementwise_add_kernel.h"
#include "paddle/phi/kernels/sparse/elementwise_kernel.h"
#include "paddle/phi/kernels/sparse/empty_kernel.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/sparse_coo_tensor.h"
#include "paddle/phi/core/sparse_coo_tensor.h"
#include "paddle/phi/core/sparse_csr_tensor.h"
#include "paddle/phi/core/sparse_csr_tensor.h"
...
@@ -78,5 +82,21 @@ DEFINE_ELEMENTWISE_KERNEL_FUNC(Subtract)
...
@@ -78,5 +82,21 @@ DEFINE_ELEMENTWISE_KERNEL_FUNC(Subtract)
DEFINE_ELEMENTWISE_KERNEL_FUNC
(
Multiply
)
DEFINE_ELEMENTWISE_KERNEL_FUNC
(
Multiply
)
DEFINE_ELEMENTWISE_KERNEL_FUNC
(
Divide
)
DEFINE_ELEMENTWISE_KERNEL_FUNC
(
Divide
)
template
<
typename
T
,
typename
Context
>
void
ElementWiseAddDenseKernel
(
const
Context
&
dev_ctx
,
const
SparseCooTensor
&
x
,
const
DenseTensor
&
y
,
SparseCooTensor
*
out
)
{
// TODO(zhangkaiuo): to support universal sparse + dense
if
(
y
.
dims
().
size
()
==
1
&&
y
.
dims
()[
0
]
==
x
.
dims
()[
x
.
dims
().
size
()
-
1
])
{
EmptyLikeCooKernel
<
T
,
Context
>
(
dev_ctx
,
x
,
out
);
phi
::
AddKernel
<
T
,
Context
>
(
dev_ctx
,
x
.
values
(),
y
,
out
->
mutable_values
());
out
->
SetIndicesDict
(
x
.
GetIndicesDict
());
}
else
{
PADDLE_THROW
(
errors
::
Unimplemented
(
"Not support Sparse + Dense in GPU mode"
));
}
}
}
// namespace sparse
}
// namespace sparse
}
// namespace phi
}
// namespace phi
paddle/phi/kernels/sparse/gpu/conv.cu.h
浏览文件 @
10225d22
...
@@ -15,6 +15,7 @@ limitations under the License. */
...
@@ -15,6 +15,7 @@ limitations under the License. */
#pragma once
#pragma once
#include <thrust/remove.h>
#include <thrust/remove.h>
#include <thrust/sort.h>
#include <thrust/unique.h>
#include <thrust/unique.h>
#include "paddle/phi/kernels/sparse/conv_kernel.h"
#include "paddle/phi/kernels/sparse/conv_kernel.h"
...
@@ -186,8 +187,7 @@ __global__ void UniqueKernel(const IntT* in_indexs,
...
@@ -186,8 +187,7 @@ __global__ void UniqueKernel(const IntT* in_indexs,
if
(
i
<
rulebook_len
)
{
if
(
i
<
rulebook_len
)
{
// atomicOr only support int
// atomicOr only support int
int
index
=
static_cast
<
int
>
(
in_indexs
[
i
]);
int
index
=
static_cast
<
int
>
(
in_indexs
[
i
]);
int
change_index
=
index
==
0
?
-
1
:
index
;
int
flag
=
atomicOr
(
out_index_table
+
index
,
1
);
int
flag
=
atomicOr
(
out_index_table
+
index
,
change_index
);
if
(
flag
==
0
)
{
if
(
flag
==
0
)
{
int
j
=
atomicAdd
(
&
count
,
1
);
int
j
=
atomicAdd
(
&
count
,
1
);
cache
[
j
]
=
index
;
cache
[
j
]
=
index
;
...
@@ -772,6 +772,7 @@ int ProductRuleBook(const Context& dev_ctx,
...
@@ -772,6 +772,7 @@ int ProductRuleBook(const Context& dev_ctx,
phi
::
backends
::
gpu
::
GpuMemsetAsync
(
phi
::
backends
::
gpu
::
GpuMemsetAsync
(
out_index_table_ptr
,
0
,
sizeof
(
int
)
*
table_size
,
dev_ctx
.
stream
());
out_index_table_ptr
,
0
,
sizeof
(
int
)
*
table_size
,
dev_ctx
.
stream
());
phi
::
backends
::
gpu
::
GpuMemsetAsync
(
phi
::
backends
::
gpu
::
GpuMemsetAsync
(
unique_key_ptr
,
0
,
sizeof
(
int
),
dev_ctx
.
stream
());
unique_key_ptr
,
0
,
sizeof
(
int
),
dev_ctx
.
stream
());
...
@@ -785,6 +786,7 @@ int ProductRuleBook(const Context& dev_ctx,
...
@@ -785,6 +786,7 @@ int ProductRuleBook(const Context& dev_ctx,
out_index_table_ptr
,
out_index_table_ptr
,
out_index_ptr
,
out_index_ptr
,
unique_key_ptr
);
unique_key_ptr
);
int
out_nnz
=
0
;
int
out_nnz
=
0
;
phi
::
backends
::
gpu
::
GpuMemcpyAsync
(
&
out_nnz
,
phi
::
backends
::
gpu
::
GpuMemcpyAsync
(
&
out_nnz
,
unique_key_ptr
,
unique_key_ptr
,
...
@@ -792,6 +794,13 @@ int ProductRuleBook(const Context& dev_ctx,
...
@@ -792,6 +794,13 @@ int ProductRuleBook(const Context& dev_ctx,
gpuMemcpyDeviceToHost
,
gpuMemcpyDeviceToHost
,
dev_ctx
.
stream
());
dev_ctx
.
stream
());
dev_ctx
.
Wait
();
dev_ctx
.
Wait
();
#ifdef PADDLE_WITH_HIP
thrust
::
sort
(
thrust
::
hip
::
par
.
on
(
dev_ctx
.
stream
()),
#else
thrust
::
sort
(
thrust
::
cuda
::
par
.
on
(
dev_ctx
.
stream
()),
#endif
out_index_ptr
,
out_index_ptr
+
out_nnz
);
const
int64_t
sparse_dim
=
4
;
const
int64_t
sparse_dim
=
4
;
phi
::
DenseTensor
out_indices
=
phi
::
DenseTensor
out_indices
=
...
...
paddle/phi/kernels/sparse/gpu/elementwise_grad_kernel.cu
浏览文件 @
10225d22
...
@@ -15,6 +15,9 @@ limitations under the License. */
...
@@ -15,6 +15,9 @@ limitations under the License. */
#include "paddle/phi/kernels/sparse/elementwise_grad_kernel.h"
#include "paddle/phi/kernels/sparse/elementwise_grad_kernel.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/tensor_utils.h"
#include "paddle/phi/core/tensor_utils.h"
#include "paddle/phi/kernels/empty_kernel.h"
#include "paddle/phi/kernels/funcs/elementwise_grad_base.h"
#include "paddle/phi/kernels/funcs/reduce_function.h"
#include "paddle/phi/kernels/sparse/empty_kernel.h"
#include "paddle/phi/kernels/sparse/empty_kernel.h"
namespace
phi
{
namespace
phi
{
...
@@ -54,3 +57,15 @@ PD_REGISTER_KERNEL(add_coo_coo_grad,
...
@@ -54,3 +57,15 @@ PD_REGISTER_KERNEL(add_coo_coo_grad,
kernel
->
InputAt
(
0
).
SetDataLayout
(
phi
::
DataLayout
::
SPARSE_COO
);
kernel
->
InputAt
(
0
).
SetDataLayout
(
phi
::
DataLayout
::
SPARSE_COO
);
kernel
->
InputAt
(
1
).
SetDataLayout
(
phi
::
DataLayout
::
SPARSE_COO
);
kernel
->
InputAt
(
1
).
SetDataLayout
(
phi
::
DataLayout
::
SPARSE_COO
);
}
}
PD_REGISTER_KERNEL
(
add_coo_dense_grad
,
GPU
,
ALL_LAYOUT
,
phi
::
sparse
::
ElementWiseAddDenseGradKernel
,
float
,
double
,
int
,
int64_t
,
phi
::
dtype
::
float16
)
{
kernel
->
InputAt
(
0
).
SetDataLayout
(
phi
::
DataLayout
::
SPARSE_COO
);
}
paddle/phi/kernels/sparse/gpu/elementwise_kernel.cu
浏览文件 @
10225d22
...
@@ -31,6 +31,7 @@ void ElementWiseAddCooGPUKernel(const GPUContext& dev_ctx,
...
@@ -31,6 +31,7 @@ void ElementWiseAddCooGPUKernel(const GPUContext& dev_ctx,
const
SparseCooTensor
&
x
,
const
SparseCooTensor
&
x
,
const
SparseCooTensor
&
y
,
const
SparseCooTensor
&
y
,
SparseCooTensor
*
out
)
{
SparseCooTensor
*
out
)
{
// TODO(zhangkaiuo): to support universal sparse + sparse
const
auto
&
x_indices
=
x
.
indices
();
const
auto
&
x_indices
=
x
.
indices
();
const
auto
&
y_indices
=
y
.
indices
();
const
auto
&
y_indices
=
y
.
indices
();
PADDLE_ENFORCE_EQ
(
PADDLE_ENFORCE_EQ
(
...
@@ -57,6 +58,7 @@ void ElementWiseAddCooGPUKernel(const GPUContext& dev_ctx,
...
@@ -57,6 +58,7 @@ void ElementWiseAddCooGPUKernel(const GPUContext& dev_ctx,
EmptyLikeCooKernel
<
T
,
GPUContext
>
(
dev_ctx
,
x
,
out
);
EmptyLikeCooKernel
<
T
,
GPUContext
>
(
dev_ctx
,
x
,
out
);
phi
::
AddKernel
<
T
,
GPUContext
>
(
phi
::
AddKernel
<
T
,
GPUContext
>
(
dev_ctx
,
x
.
values
(),
y
.
values
(),
out
->
mutable_values
());
dev_ctx
,
x
.
values
(),
y
.
values
(),
out
->
mutable_values
());
out
->
SetIndicesDict
(
x
.
GetIndicesDict
());
}
}
template
<
typename
T
,
typename
Context
>
template
<
typename
T
,
typename
Context
>
...
@@ -86,3 +88,15 @@ PD_REGISTER_KERNEL(add_coo_coo,
...
@@ -86,3 +88,15 @@ PD_REGISTER_KERNEL(add_coo_coo,
kernel
->
InputAt
(
0
).
SetDataLayout
(
phi
::
DataLayout
::
SPARSE_COO
);
kernel
->
InputAt
(
0
).
SetDataLayout
(
phi
::
DataLayout
::
SPARSE_COO
);
kernel
->
InputAt
(
1
).
SetDataLayout
(
phi
::
DataLayout
::
SPARSE_COO
);
kernel
->
InputAt
(
1
).
SetDataLayout
(
phi
::
DataLayout
::
SPARSE_COO
);
}
}
PD_REGISTER_KERNEL
(
add_coo_dense
,
GPU
,
ALL_LAYOUT
,
phi
::
sparse
::
ElementWiseAddDenseKernel
,
float
,
double
,
int
,
int64_t
,
phi
::
dtype
::
float16
)
{
kernel
->
InputAt
(
0
).
SetDataLayout
(
phi
::
DataLayout
::
SPARSE_COO
);
}
paddle/phi/ops/compat/sparse_manual_op_sig.cc
0 → 100644
浏览文件 @
10225d22
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/core/compat/op_utils.h"
namespace
phi
{
// TODO(zhangkaihuo): add csr op
KernelSignature
SparseSparseCooTensorOpArgumentMapping
(
const
ArgumentMappingContext
&
ctx
)
{
return
KernelSignature
(
"sparse_coo_tensor"
,
{
"values"
,
"indices"
},
{
"dense_shape"
},
{
"out"
});
}
KernelSignature
SparseValuesOpArgumentMapping
(
const
ArgumentMappingContext
&
ctx
)
{
if
(
ctx
.
IsSparseCooTensorInput
(
"x"
))
{
return
KernelSignature
(
"values_coo"
,
{
"x"
},
{},
{
"out"
});
}
else
{
return
KernelSignature
(
"unregistered"
,
{},
{},
{});
}
}
KernelSignature
SparseIndicesOpArgumentMapping
(
const
ArgumentMappingContext
&
ctx
)
{
if
(
ctx
.
IsSparseCooTensorInput
(
"x"
))
{
return
KernelSignature
(
"indices_coo"
,
{
"x"
},
{},
{
"out"
});
}
else
{
return
KernelSignature
(
"unregistered"
,
{},
{},
{});
}
}
KernelSignature
SparseToDenseOpArgumentMapping
(
const
ArgumentMappingContext
&
ctx
)
{
if
(
ctx
.
IsSparseCooTensorInput
(
"x"
))
{
return
KernelSignature
(
"coo_to_dense"
,
{
"x"
},
{},
{
"out"
});
}
else
{
return
KernelSignature
(
"unregistered"
,
{},
{},
{});
}
}
KernelSignature
SparseReluOpArgumentMapping
(
const
ArgumentMappingContext
&
ctx
)
{
if
(
ctx
.
IsSparseCooTensorInput
(
"x"
))
{
return
KernelSignature
(
"relu_coo"
,
{
"x"
},
{},
{
"out"
});
}
else
{
return
KernelSignature
(
"unregistered"
,
{},
{},
{});
}
}
KernelSignature
SparseConv3dOpArgumentMapping
(
const
ArgumentMappingContext
&
ctx
)
{
if
(
ctx
.
IsSparseCooTensorInput
(
"x"
))
{
return
KernelSignature
(
"conv3d_coo"
,
{
"x"
,
"kernel"
},
{
"paddings"
,
"dilations"
,
"strides"
,
"groups"
,
"subm"
,
"key"
},
{
"out"
,
"rulebook"
,
"counter"
});
}
else
{
return
KernelSignature
(
"unregistered"
,
{},
{},
{});
}
}
KernelSignature
SparseAddOpArgumentMapping
(
const
ArgumentMappingContext
&
ctx
)
{
if
(
ctx
.
IsSparseCooTensorInput
(
"x"
)
&&
ctx
.
IsSparseCooTensorInput
(
"y"
))
{
return
KernelSignature
(
"add_coo_coo"
,
{
"x"
,
"y"
},
{},
{
"out"
});
}
else
if
(
ctx
.
IsSparseCooTensorInput
(
"x"
)
&&
ctx
.
IsDenseTensorInput
(
"y"
))
{
return
KernelSignature
(
"add_coo_dense"
,
{
"x"
,
"y"
},
{},
{
"out"
});
}
else
{
return
KernelSignature
(
"unregistered"
,
{},
{},
{});
}
}
}
// namespace phi
PD_REGISTER_BASE_KERNEL_NAME
(
sparse_sparse_coo_tensor
,
sparse_coo_tensor
);
PD_REGISTER_ARG_MAPPING_FN
(
sparse_sparse_coo_tensor
,
phi
::
SparseSparseCooTensorOpArgumentMapping
);
PD_REGISTER_BASE_KERNEL_NAME
(
sparse_values
,
values_coo
);
PD_REGISTER_ARG_MAPPING_FN
(
sparse_values
,
phi
::
SparseValuesOpArgumentMapping
);
PD_REGISTER_BASE_KERNEL_NAME
(
sparse_indices
,
indices_coo
);
PD_REGISTER_ARG_MAPPING_FN
(
sparse_indices
,
phi
::
SparseIndicesOpArgumentMapping
);
PD_REGISTER_BASE_KERNEL_NAME
(
sparse_to_dense
,
coo_to_dense
);
PD_REGISTER_ARG_MAPPING_FN
(
sparse_to_dense
,
phi
::
SparseToDenseOpArgumentMapping
);
PD_REGISTER_BASE_KERNEL_NAME
(
sparse_relu
,
relu_coo
);
PD_REGISTER_ARG_MAPPING_FN
(
sparse_relu
,
phi
::
SparseReluOpArgumentMapping
);
PD_REGISTER_BASE_KERNEL_NAME
(
sparse_conv3d
,
conv3d_coo
);
PD_REGISTER_ARG_MAPPING_FN
(
sparse_conv3d
,
phi
::
SparseConv3dOpArgumentMapping
);
PD_REGISTER_BASE_KERNEL_NAME
(
sparse_add
,
add_coo_coo
);
PD_REGISTER_ARG_MAPPING_FN
(
sparse_add
,
phi
::
SparseAddOpArgumentMapping
);
paddle/phi/tests/ops/test_op_signature.h
浏览文件 @
10225d22
...
@@ -82,6 +82,10 @@ class TestArgumentMappingContext : public phi::ArgumentMappingContext {
...
@@ -82,6 +82,10 @@ class TestArgumentMappingContext : public phi::ArgumentMappingContext {
return
false
;
return
false
;
}
}
bool
IsSparseCooTensorInput
(
const
std
::
string
&
name
)
const
override
{
return
false
;
}
bool
IsDenseTensorOutput
(
const
std
::
string
&
name
)
const
override
{
bool
IsDenseTensorOutput
(
const
std
::
string
&
name
)
const
override
{
return
dense_tensor_outputs
.
count
(
name
)
>
0
;
return
dense_tensor_outputs
.
count
(
name
)
>
0
;
}
}
...
...
python/paddle/fluid/framework.py
浏览文件 @
10225d22
...
@@ -1373,6 +1373,9 @@ class Variable(object):
...
@@ -1373,6 +1373,9 @@ class Variable(object):
type
=
core
.
VarDesc
.
VarType
.
STRINGS
type
=
core
.
VarDesc
.
VarType
.
STRINGS
lod_level
=
None
lod_level
=
None
if
type
==
core
.
VarDesc
.
VarType
.
SPARSE_COO
:
lod_level
=
None
self
.
belong_to_optimizer
=
belong_to_optimizer
self
.
belong_to_optimizer
=
belong_to_optimizer
self
.
error_clip
=
error_clip
self
.
error_clip
=
error_clip
...
...
python/paddle/fluid/layer_helper_base.py
浏览文件 @
10225d22
...
@@ -410,6 +410,30 @@ class LayerHelperBase(object):
...
@@ -410,6 +410,30 @@ class LayerHelperBase(object):
persistable
=
False
,
persistable
=
False
,
stop_gradient
=
stop_gradient
)
stop_gradient
=
stop_gradient
)
def
create_sparse_variable_for_type_inference
(
self
,
dtype
,
stop_gradient
=
False
,
shape
=
None
):
"""Create a temporary sparse variable that should be type inferred layer.
Note:
The default type will be set to SPARSE_COO. However, when
the var is used as operator output, its type will be updated
based on operator's `VarTypeInference` implementation in
infer_var_type.
"""
# set global dtype
if
not
dtype
:
dtype
=
self
.
__dtype
return
self
.
main_program
.
current_block
().
create_var
(
name
=
unique_name
.
generate_with_ignorable_key
(
"."
.
join
(
[
self
.
name
,
'tmp'
])),
dtype
=
dtype
,
shape
=
shape
,
type
=
core
.
VarDesc
.
VarType
.
SPARSE_COO
,
persistable
=
False
,
stop_gradient
=
stop_gradient
)
def
create_variable
(
self
,
*
args
,
**
kwargs
):
def
create_variable
(
self
,
*
args
,
**
kwargs
):
"""Create Variable for this layers.
"""Create Variable for this layers.
Returns created Variable.
Returns created Variable.
...
...
python/paddle/fluid/layers/math_op_patch.py
浏览文件 @
10225d22
...
@@ -80,6 +80,10 @@ def monkey_patch_variable():
...
@@ -80,6 +80,10 @@ def monkey_patch_variable():
tmp_name
=
unique_tmp_name
()
tmp_name
=
unique_tmp_name
()
return
block
.
create_var
(
name
=
tmp_name
,
dtype
=
dtype
)
return
block
.
create_var
(
name
=
tmp_name
,
dtype
=
dtype
)
def
create_new_tmp_sparse_var
(
block
,
dtype
,
type
):
tmp_name
=
unique_tmp_name
()
return
block
.
create_var
(
name
=
tmp_name
,
dtype
=
dtype
,
type
=
type
)
def
create_tensor
(
block
,
value
,
dtype
,
shape
):
def
create_tensor
(
block
,
value
,
dtype
,
shape
):
value
=
float
(
value
)
value
=
float
(
value
)
var
=
create_new_tmp_var
(
block
,
dtype
)
var
=
create_new_tmp_var
(
block
,
dtype
)
...
@@ -433,6 +437,33 @@ def monkey_patch_variable():
...
@@ -433,6 +437,33 @@ def monkey_patch_variable():
__impl__
.
__name__
=
method_name
__impl__
.
__name__
=
method_name
return
__impl__
return
__impl__
def
values
(
var
):
block
=
current_block
(
var
)
out
=
create_new_tmp_var
(
block
,
var
.
dtype
)
block
.
append_op
(
type
=
"sparse_values"
,
inputs
=
{
"x"
:
[
var
]},
outputs
=
{
"out"
:
[
out
]},
attrs
=
{})
return
out
def
indices
(
var
):
block
=
current_block
(
var
)
out
=
create_new_tmp_var
(
block
,
var
.
dtype
)
block
.
append_op
(
type
=
"sparse_indices"
,
inputs
=
{
"x"
:
[
var
]},
outputs
=
{
"out"
:
[
out
]},
attrs
=
{})
return
out
def
to_dense
(
var
):
block
=
current_block
(
var
)
out
=
create_new_tmp_var
(
block
,
var
.
dtype
)
block
.
append_op
(
type
=
"sparse_to_dense"
,
inputs
=
{
"x"
:
[
var
]},
outputs
=
{
"out"
:
[
out
]},
attrs
=
{})
return
out
variable_methods
=
[
variable_methods
=
[
# b=-a
# b=-a
(
'__neg__'
,
_neg_
),
(
'__neg__'
,
_neg_
),
...
@@ -485,7 +516,10 @@ def monkey_patch_variable():
...
@@ -485,7 +516,10 @@ def monkey_patch_variable():
(
'__lt__'
,
_binary_creator_
(
'__lt__'
,
'less_than'
,
False
,
None
)),
(
'__lt__'
,
_binary_creator_
(
'__lt__'
,
'less_than'
,
False
,
None
)),
(
'__le__'
,
_binary_creator_
(
'__le__'
,
'less_equal'
,
False
,
None
)),
(
'__le__'
,
_binary_creator_
(
'__le__'
,
'less_equal'
,
False
,
None
)),
(
'__gt__'
,
_binary_creator_
(
'__gt__'
,
'greater_than'
,
False
,
None
)),
(
'__gt__'
,
_binary_creator_
(
'__gt__'
,
'greater_than'
,
False
,
None
)),
(
'__ge__'
,
_binary_creator_
(
'__ge__'
,
'greater_equal'
,
False
,
None
))
(
'__ge__'
,
_binary_creator_
(
'__ge__'
,
'greater_equal'
,
False
,
None
)),
(
'values'
,
values
),
(
'indices'
,
indices
),
(
'to_dense'
,
to_dense
),
]
]
global
_already_patch_variable
global
_already_patch_variable
...
...
python/paddle/fluid/tests/unittests/test_sparse_conv_op.py
浏览文件 @
10225d22
...
@@ -19,6 +19,7 @@ import paddle
...
@@ -19,6 +19,7 @@ import paddle
from
paddle
import
_C_ops
,
_legacy_C_ops
from
paddle
import
_C_ops
,
_legacy_C_ops
from
paddle.fluid
import
core
from
paddle.fluid
import
core
from
paddle.fluid.framework
import
_test_eager_guard
from
paddle.fluid.framework
import
_test_eager_guard
import
paddle.incubate.sparse
as
sparse
class
TestSparseConv
(
unittest
.
TestCase
):
class
TestSparseConv
(
unittest
.
TestCase
):
...
@@ -159,3 +160,66 @@ class TestSparseConv(unittest.TestCase):
...
@@ -159,3 +160,66 @@ class TestSparseConv(unittest.TestCase):
sp_conv3d
.
bias
.
grad
.
numpy
(),
sp_conv3d
.
bias
.
grad
.
numpy
(),
atol
=
1e-5
,
atol
=
1e-5
,
rtol
=
1e-5
)
rtol
=
1e-5
)
class
TestStatic
(
unittest
.
TestCase
):
def
test
(
self
):
paddle
.
enable_static
()
indices
=
paddle
.
static
.
data
(
name
=
'indices'
,
shape
=
[
4
,
4
],
dtype
=
'int32'
)
values
=
paddle
.
static
.
data
(
name
=
'values'
,
shape
=
[
4
,
1
],
dtype
=
'float32'
)
dense_shape
=
[
1
,
1
,
3
,
4
,
1
]
sp_x
=
sparse
.
sparse_coo_tensor
(
indices
,
values
,
dense_shape
)
weight_shape
=
[
1
,
3
,
3
,
1
,
1
]
weight
=
paddle
.
static
.
data
(
name
=
'weight'
,
shape
=
weight_shape
,
dtype
=
'float32'
)
bias_shape
=
[
1
]
bias
=
paddle
.
static
.
data
(
name
=
'bias'
,
shape
=
bias_shape
,
dtype
=
'float32'
)
out
=
sparse
.
nn
.
functional
.
conv3d
(
sp_x
,
weight
,
bias
,
stride
=
1
,
padding
=
0
,
dilation
=
1
,
groups
=
1
,
data_format
=
"NDHWC"
)
sp_out
=
sparse
.
nn
.
functional
.
relu
(
out
)
out_indices
=
sp_out
.
indices
()
out_values
=
sp_out
.
values
()
out
=
sp_out
.
to_dense
()
exe
=
paddle
.
static
.
Executor
()
indices_data
=
[[
0
,
0
,
0
,
0
],
[
0
,
0
,
0
,
0
],
[
0
,
0
,
1
,
2
],
[
1
,
3
,
2
,
3
]]
values_data
=
[[
1.0
],
[
2.0
],
[
3.0
],
[
4.0
]]
weight_data
=
np
.
array
([[[[[
1
],
[
1
],
[
1
]],
[[
1
],
[
1
],
[
1
]],
[[
1
],
[
1
],
[
1
]]]]]).
astype
(
'float32'
)
weight_data
=
weight_data
.
reshape
(
weight_shape
)
bias_data
=
np
.
array
([
1
]).
astype
(
'float32'
)
fetch
=
exe
.
run
(
feed
=
{
'indices'
:
indices_data
,
'values'
:
values_data
,
'weight'
:
weight_data
,
'bias'
:
bias_data
},
fetch_list
=
[
out
,
out_indices
,
out_values
],
return_numpy
=
True
)
correct_out
=
np
.
array
([[[[[
5.0
],
[
11.0
]]]]]).
astype
(
'float64'
)
correct_out_values
=
[[
5.0
],
[
11.0
]]
assert
np
.
array_equal
(
correct_out
,
fetch
[
0
])
assert
np
.
array_equal
(
correct_out_values
,
fetch
[
2
])
assert
out_indices
.
dtype
==
paddle
.
int32
paddle
.
disable_static
()
if
__name__
==
"__main__"
:
unittest
.
main
()
python/paddle/fluid/tests/unittests/test_sparse_elementwise_op.py
浏览文件 @
10225d22
...
@@ -163,6 +163,32 @@ class TestSparseElementWiseAPI(unittest.TestCase):
...
@@ -163,6 +163,32 @@ class TestSparseElementWiseAPI(unittest.TestCase):
np
.
testing
.
assert_allclose
(
sp_b
.
grad
.
values
().
numpy
(),
np
.
testing
.
assert_allclose
(
sp_b
.
grad
.
values
().
numpy
(),
values2
.
grad
.
numpy
())
values2
.
grad
.
numpy
())
def
test_add_bias
(
self
):
indices_data
=
[[
0
,
1
],
[
0
,
3
]]
values_data
=
[[
1.0
,
1.0
],
[
2.0
,
2.0
]]
shape
=
[
2
,
4
,
2
]
sp_a
=
sparse
.
sparse_coo_tensor
(
indices_data
,
values_data
,
shape
,
stop_gradient
=
False
)
bias_values
=
[
1.0
,
2.0
]
values1
=
paddle
.
to_tensor
(
values_data
,
stop_gradient
=
False
)
values2
=
paddle
.
to_tensor
(
bias_values
,
stop_gradient
=
False
)
values3
=
paddle
.
to_tensor
(
bias_values
,
stop_gradient
=
False
)
#c.values() = a.values() + b
sp_c
=
sparse
.
add
(
sp_a
,
values2
)
sp_c
.
backward
()
ref_c
=
values1
+
values3
ref_c
.
backward
()
np
.
testing
.
assert_allclose
(
sp_c
.
values
().
numpy
(),
ref_c
.
numpy
())
np
.
testing
.
assert_allclose
(
sp_a
.
grad
.
values
().
numpy
(),
values1
.
grad
.
numpy
())
np
.
testing
.
assert_allclose
(
values2
.
grad
.
numpy
(),
values3
.
grad
.
numpy
())
if
__name__
==
"__main__"
:
if
__name__
==
"__main__"
:
paddle
.
device
.
set_device
(
'cpu'
)
paddle
.
device
.
set_device
(
'cpu'
)
...
...
python/paddle/incubate/sparse/binary.py
浏览文件 @
10225d22
...
@@ -14,6 +14,9 @@
...
@@ -14,6 +14,9 @@
from
paddle
import
_C_ops
,
_legacy_C_ops
from
paddle
import
_C_ops
,
_legacy_C_ops
from
paddle.fluid.framework
import
dygraph_only
,
core
from
paddle.fluid.framework
import
dygraph_only
,
core
from
paddle
import
in_dynamic_mode
from
paddle.fluid.layer_helper
import
LayerHelper
from
.unary
import
cast
__all__
=
[]
__all__
=
[]
...
@@ -253,8 +256,20 @@ def add(x, y, name=None):
...
@@ -253,8 +256,20 @@ def add(x, y, name=None):
"""
"""
if
y
.
dtype
!=
x
.
dtype
:
if
y
.
dtype
!=
x
.
dtype
:
y
=
_C_ops
.
sparse_cast
(
y
,
None
,
x
.
dtype
)
y
=
cast
(
y
,
None
,
x
.
dtype
)
return
_C_ops
.
sparse_add
(
x
,
y
)
if
in_dynamic_mode
():
return
_C_ops
.
sparse_add
(
x
,
y
)
else
:
op_type
=
'sparse_add'
inputs
=
{
'x'
:
x
,
'y'
:
y
}
helper
=
LayerHelper
(
op_type
)
out
=
helper
.
create_sparse_variable_for_type_inference
(
x
.
dtype
)
helper
.
append_op
(
type
=
op_type
,
inputs
=
inputs
,
outputs
=
{
'out'
:
out
},
attrs
=
{})
return
out
@
dygraph_only
@
dygraph_only
...
...
python/paddle/incubate/sparse/creation.py
浏览文件 @
10225d22
...
@@ -18,6 +18,8 @@ from paddle.fluid.framework import core, dygraph_only
...
@@ -18,6 +18,8 @@ from paddle.fluid.framework import core, dygraph_only
from
paddle.fluid.framework
import
_current_expected_place
,
_get_paddle_place
from
paddle.fluid.framework
import
_current_expected_place
,
_get_paddle_place
from
paddle.tensor
import
to_tensor
,
max
from
paddle.tensor
import
to_tensor
,
max
from
paddle.fluid.data_feeder
import
check_variable_and_dtype
,
check_type
,
check_dtype
,
convert_dtype
from
paddle.fluid.data_feeder
import
check_variable_and_dtype
,
check_type
,
check_dtype
,
convert_dtype
from
paddle
import
in_dynamic_mode
from
paddle.fluid.layer_helper
import
LayerHelper
import
numpy
as
np
import
numpy
as
np
...
@@ -64,7 +66,6 @@ def _check_indices_dtype(dtype):
...
@@ -64,7 +66,6 @@ def _check_indices_dtype(dtype):
)
)
@
dygraph_only
def
sparse_coo_tensor
(
indices
,
def
sparse_coo_tensor
(
indices
,
values
,
values
,
shape
=
None
,
shape
=
None
,
...
@@ -120,53 +121,68 @@ def sparse_coo_tensor(indices,
...
@@ -120,53 +121,68 @@ def sparse_coo_tensor(indices,
# values=[1., 2., 3.])
# values=[1., 2., 3.])
"""
"""
place
=
_get_place
(
place
)
if
in_dynamic_mode
():
place
=
_get_place
(
place
)
if
not
isinstance
(
indices
,
core
.
eager
.
Tensor
):
if
not
isinstance
(
indices
,
core
.
eager
.
Tensor
):
indices
=
to_tensor
(
indices
,
indices
=
to_tensor
(
indices
,
dtype
=
None
,
dtype
=
None
,
place
=
place
,
place
=
place
,
stop_gradient
=
True
)
stop_gradient
=
True
)
if
not
isinstance
(
values
,
core
.
eager
.
Tensor
):
if
not
isinstance
(
values
,
core
.
eager
.
Tensor
):
values
=
to_tensor
(
values
,
dtype
,
place
,
stop_gradient
)
values
=
to_tensor
(
values
,
dtype
,
place
,
stop_gradient
)
if
len
(
indices
.
shape
)
!=
2
:
if
len
(
indices
.
shape
)
!=
2
:
raise
ValueError
(
"'indices' must be 2-D."
)
raise
ValueError
(
"'indices' must be 2-D."
)
nnz
=
indices
.
shape
[
1
]
nnz
=
indices
.
shape
[
1
]
sparse_dim
=
indices
.
shape
[
0
]
sparse_dim
=
indices
.
shape
[
0
]
_check_indices_dtype
(
indices
.
dtype
)
_check_indices_dtype
(
indices
.
dtype
)
if
nnz
!=
values
.
shape
[
0
]:
if
nnz
!=
values
.
shape
[
0
]:
raise
ValueError
(
raise
ValueError
(
"the indices and values must have same number of non-zero, but get {} and {}"
"the indices and values must have same number of non-zero, but get {} and {}"
.
format
(
nnz
,
values
.
shape
[
0
]))
.
format
(
nnz
,
values
.
shape
[
0
]))
dense_dim
=
len
(
values
.
shape
)
-
1
dense_dim
=
len
(
values
.
shape
)
-
1
if
not
indices
.
place
.
_equals
(
place
):
if
not
indices
.
place
.
_equals
(
place
):
indices
=
indices
.
_copy_to
(
place
,
False
)
indices
=
indices
.
_copy_to
(
place
,
False
)
if
not
values
.
place
.
_equals
(
place
):
if
not
values
.
place
.
_equals
(
place
):
values
=
values
.
_copy_to
(
place
,
False
)
values
=
values
.
_copy_to
(
place
,
False
)
values
=
_handle_dtype
(
values
,
dtype
)
values
=
_handle_dtype
(
values
,
dtype
)
values
.
stop_gradient
=
stop_gradient
values
.
stop_gradient
=
stop_gradient
min_shape
=
_infer_dense_shape
(
indices
,
values
)
min_shape
=
_infer_dense_shape
(
indices
,
values
)
if
shape
is
None
:
if
shape
is
None
:
shape
=
min_shape
shape
=
min_shape
else
:
else
:
if
shape
<
min_shape
:
if
shape
<
min_shape
:
raise
ValueError
(
raise
ValueError
(
"the minimun shape required is {}, but get {}"
.
format
(
"the minimun shape required is {}, but get {}"
.
format
(
min_shape
,
shape
))
min_shape
,
shape
))
if
len
(
shape
)
!=
sparse_dim
+
dense_dim
:
if
len
(
shape
)
!=
sparse_dim
+
dense_dim
:
raise
ValueError
(
raise
ValueError
(
"the number of dimensions(len(shape) must be sparse_dim({}) + dense_dim({}), but get {}"
"the number of dimensions(len(shape) must be sparse_dim({}) + dense_dim({}), but get {}"
.
format
(
sparse_dim
,
dense_dim
,
len
(
shape
)))
.
format
(
sparse_dim
,
dense_dim
,
len
(
shape
)))
return
_C_ops
.
sparse_sparse_coo_tensor
(
values
,
indices
,
shape
)
return
_C_ops
.
sparse_sparse_coo_tensor
(
values
,
indices
,
shape
)
else
:
op_type
=
'sparse_sparse_coo_tensor'
inputs
=
{
'values'
:
values
,
'indices'
:
indices
}
if
shape
[
0
]
is
None
:
shape
[
0
]
=
-
1
attrs
=
{
'dense_shape'
:
shape
}
helper
=
LayerHelper
(
op_type
)
out
=
helper
.
create_sparse_variable_for_type_inference
(
dtype
)
helper
.
append_op
(
type
=
op_type
,
inputs
=
inputs
,
outputs
=
{
'out'
:
out
},
attrs
=
attrs
)
return
out
#TODO: need to support shape is None
#TODO: need to support shape is None
...
...
python/paddle/incubate/sparse/nn/functional/activation.py
浏览文件 @
10225d22
...
@@ -16,9 +16,10 @@ __all__ = []
...
@@ -16,9 +16,10 @@ __all__ = []
from
paddle
import
_C_ops
,
_legacy_C_ops
from
paddle
import
_C_ops
,
_legacy_C_ops
from
paddle.fluid.framework
import
dygraph_only
from
paddle.fluid.framework
import
dygraph_only
from
paddle
import
in_dynamic_mode
from
paddle.fluid.layer_helper
import
LayerHelper
@
dygraph_only
def
relu
(
x
,
name
=
None
):
def
relu
(
x
,
name
=
None
):
"""
"""
sparse relu activation, requiring x to be a SparseCooTensor or SparseCsrTensor.
sparse relu activation, requiring x to be a SparseCooTensor or SparseCsrTensor.
...
@@ -45,7 +46,17 @@ def relu(x, name=None):
...
@@ -45,7 +46,17 @@ def relu(x, name=None):
out = paddle.incubate.sparse.nn.functional.relu(sparse_x)
out = paddle.incubate.sparse.nn.functional.relu(sparse_x)
# [0., 0., 1.]
# [0., 0., 1.]
"""
"""
return
_C_ops
.
sparse_relu
(
x
)
if
in_dynamic_mode
():
return
_C_ops
.
sparse_relu
(
x
)
else
:
op_type
=
'sparse_relu'
helper
=
LayerHelper
(
op_type
)
out
=
helper
.
create_sparse_variable_for_type_inference
(
x
.
dtype
)
helper
.
append_op
(
type
=
op_type
,
inputs
=
{
'x'
:
x
},
outputs
=
{
'out'
:
out
},
attrs
=
{})
return
out
@
dygraph_only
@
dygraph_only
...
...
python/paddle/incubate/sparse/nn/functional/conv.py
浏览文件 @
10225d22
...
@@ -18,7 +18,9 @@ from paddle import _C_ops, _legacy_C_ops, in_dynamic_mode
...
@@ -18,7 +18,9 @@ from paddle import _C_ops, _legacy_C_ops, in_dynamic_mode
from
paddle.fluid.layers.utils
import
convert_to_list
from
paddle.fluid.layers.utils
import
convert_to_list
from
paddle.fluid.layers.nn
import
elementwise_add
from
paddle.fluid.layers.nn
import
elementwise_add
from
...creation
import
sparse_coo_tensor
from
...creation
import
sparse_coo_tensor
from
...binary
import
add
from
paddle.nn.functional.conv
import
_update_padding_nd
from
paddle.nn.functional.conv
import
_update_padding_nd
from
paddle.fluid.layer_helper
import
LayerHelper
def
_conv3d
(
x
,
def
_conv3d
(
x
,
...
@@ -32,7 +34,6 @@ def _conv3d(x,
...
@@ -32,7 +34,6 @@ def _conv3d(x,
key
=
None
,
key
=
None
,
data_format
=
"NDHWC"
,
data_format
=
"NDHWC"
,
name
=
None
):
name
=
None
):
assert
in_dynamic_mode
(),
"Currently, only support dynamic mode"
assert
groups
==
1
,
"Currently, only support groups=1"
assert
groups
==
1
,
"Currently, only support groups=1"
dims
=
3
dims
=
3
...
@@ -61,20 +62,41 @@ def _conv3d(x,
...
@@ -61,20 +62,41 @@ def _conv3d(x,
padding
,
padding_algorithm
=
_update_padding_nd
(
padding
,
channel_last
,
dims
)
padding
,
padding_algorithm
=
_update_padding_nd
(
padding
,
channel_last
,
dims
)
stride
=
convert_to_list
(
stride
,
dims
,
'stride'
)
stride
=
convert_to_list
(
stride
,
dims
,
'stride'
)
dilation
=
convert_to_list
(
dilation
,
dims
,
'dilation'
)
dilation
=
convert_to_list
(
dilation
,
dims
,
'dilation'
)
op_type
=
"conv3d"
if
in_dynamic_mode
():
pre_bias
=
_C_ops
.
sparse_conv3d
(
x
,
weight
,
padding
,
dilation
,
stride
,
pre_bias
=
_C_ops
.
sparse_conv3d
(
x
,
weight
,
padding
,
dilation
,
stride
,
groups
,
subm
,
groups
,
subm
,
key
if
key
is
not
None
else
""
)
key
if
key
is
not
None
else
""
)
if
bias
is
not
None
:
if
bias
is
not
None
:
values
=
pre_bias
.
values
()
return
add
(
pre_bias
,
bias
)
add_bias
=
elementwise_add
(
values
,
bias
,
axis
=
1
)
else
:
return
sparse_coo_tensor
(
pre_bias
.
indices
(),
return
pre_bias
add_bias
,
shape
=
pre_bias
.
shape
,
stop_gradient
=
pre_bias
.
stop_gradient
)
else
:
else
:
return
pre_bias
inputs
=
{
'x'
:
x
,
'kernel'
:
weight
}
attrs
=
{
'paddings'
:
padding
,
'dilations'
:
dilation
,
'strides'
:
stride
,
'groups'
:
groups
,
'subm'
:
subm
,
'key'
:
key
}
op_type
=
'sparse_conv3d'
helper
=
LayerHelper
(
op_type
,
**
locals
())
rulebook
=
helper
.
create_variable_for_type_inference
(
dtype
=
'int32'
,
stop_gradient
=
True
)
counter
=
helper
.
create_variable_for_type_inference
(
dtype
=
'int32'
,
stop_gradient
=
True
)
pre_bias
=
helper
.
create_sparse_variable_for_type_inference
(
x
.
dtype
)
outputs
=
{
"out"
:
pre_bias
,
"rulebook"
:
rulebook
,
"counter"
:
counter
}
helper
.
append_op
(
type
=
op_type
,
inputs
=
inputs
,
outputs
=
outputs
,
attrs
=
attrs
)
if
bias
is
not
None
:
return
add
(
pre_bias
,
bias
)
else
:
return
pre_bias
def
conv3d
(
x
,
def
conv3d
(
x
,
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录