diff --git a/paddle/fluid/framework/CMakeLists.txt b/paddle/fluid/framework/CMakeLists.txt index 2729c090659329ada2f091c484fa4527c4ec1e20..2507919e6fc7fe16a2a62820cdf37a633a89cbc4 100755 --- a/paddle/fluid/framework/CMakeLists.txt +++ b/paddle/fluid/framework/CMakeLists.txt @@ -237,7 +237,7 @@ cc_test( cc_library( var_type_traits SRCS var_type_traits.cc - DEPS framework_proto scope tensor_array) + DEPS framework_proto scope tensor_array sparse_coo_tensor) if(WITH_GPU) target_link_libraries(var_type_traits dynload_cuda) endif() @@ -1185,7 +1185,8 @@ cc_library( phi phi_api_utils op_info - shape_inference) + shape_inference + sparse_coo_tensor) cc_test( infershape_utils_test SRCS infershape_utils_test.cc diff --git a/paddle/fluid/framework/feed_fetch_type.h b/paddle/fluid/framework/feed_fetch_type.h index 3fe545ec9c5699a3783f8795a713b5f1a89cd8fc..a31435028dafb4d413f31ce545836b2b4af4ab98 100644 --- a/paddle/fluid/framework/feed_fetch_type.h +++ b/paddle/fluid/framework/feed_fetch_type.h @@ -22,10 +22,11 @@ limitations under the License. */ namespace paddle { namespace framework { -using FeedType = paddle::variant; +using FeedType = paddle::variant; using FeedList = std::vector; -using FetchType = paddle::variant; +using FetchType = paddle:: + variant; using FetchList = std::vector; using FetchUnmergedList = std::vector>; @@ -52,6 +53,13 @@ inline bool data_is_string_tensor(const FeedType &data) { return false; } +inline bool data_is_sparse_coo_tensor(const FetchType &data) { + if (data.type() == typeid(phi::SparseCooTensor)) { + return true; + } + return false; +} + static const char kFeedOpType[] = "feed"; static const char kFetchOpType[] = "fetch"; diff --git a/paddle/fluid/framework/framework.proto b/paddle/fluid/framework/framework.proto index 2a56dc60335d904d42cb1ba099333a5572aec2e1..3dbb6693e8d838af7936d448da50e8286df14b27 100644 --- a/paddle/fluid/framework/framework.proto +++ b/paddle/fluid/framework/framework.proto @@ -154,6 +154,8 @@ message VarType { FEED_LIST = 28; // The data type of phi::StringTensor PSTRING = 29; + // the data type of phi::SparseCooTensor + SPARSE_COO = 30; } required Type type = 1; @@ -186,6 +188,7 @@ message VarType { optional TensorDesc string = 8; optional TensorDesc strings = 9; optional TensorDesc vocab = 10; + optional TensorDesc sparse_coo = 11; } message VarDesc { diff --git a/paddle/fluid/framework/infershape_utils.cc b/paddle/fluid/framework/infershape_utils.cc index 22d9eb43c59ebf715edcec88b0bfbbc5c075dfd3..3535c5ffb82433426a842e0d7a073486d0f54bdd 100644 --- a/paddle/fluid/framework/infershape_utils.cc +++ b/paddle/fluid/framework/infershape_utils.cc @@ -110,6 +110,11 @@ class InferShapeArgumentMappingContext : public phi::ArgumentMappingContext { }); } + bool IsSparseCooTensorInput(const std::string& name) const override { + auto var_type = ctx_.GetInputVarType(name); + return var_type == proto::VarType::SPARSE_COO; + } + bool IsDenseTensorOutput(const std::string& name) const override { auto var_types = ctx_.GetOutputsVarType(name); return std::all_of(var_types.begin(), @@ -192,6 +197,8 @@ DDim CompatMetaTensor::dims() const { return var->Get().dims(); } else if (var->IsType()) { return var->Get().GetCompleteDims(); + } else if (var->IsType()) { + return var->Get().dims(); } else if (var->IsType()) { // use tensor array size as dims auto& tensor_array = var->Get(); @@ -217,6 +224,8 @@ phi::DataType CompatMetaTensor::dtype() const { return var->Get().dtype(); } else if (var->IsType()) { return var->Get().dtype(); + } else if (var->IsType()) { + return var->Get().dtype(); } else if (var->IsType()) { // NOTE(chenweihang): do nothing // Unsupported get dtype from LoDTensorArray now @@ -239,6 +248,8 @@ DataLayout CompatMetaTensor::layout() const { return var->Get().layout(); } else if (var->IsType()) { return var->Get().layout(); + } else if (var->IsType()) { + return var->Get().layout(); } else if (var->IsType()) { // NOTE(chenweihang): do nothing // Unsupported get layout from LoDTensorArray now @@ -264,6 +275,9 @@ void CompatMetaTensor::set_dims(const DDim& dims) { phi::DenseTensorUtils::GetMutableMeta(tensor)->dims = dims; } else if (var->IsType()) { var->GetMutable()->set_height(dims[0]); + } else if (var->IsType()) { + auto* tensor = var->GetMutable(); + phi::DenseTensorUtils::GetMutableMeta(tensor)->dims = dims; } else if (var->IsType()) { auto* tensor_array = var->GetMutable(); // Note: Here I want enforce `tensor_array->size() == 0UL`, because @@ -295,6 +309,9 @@ void CompatMetaTensor::set_dtype(phi::DataType dtype) { } else if (var->IsType()) { auto* tensor = var->GetMutable()->mutable_value(); phi::DenseTensorUtils::GetMutableMeta(tensor)->dtype = dtype; + } else if (var->IsType()) { + auto* tensor = var->GetMutable(); + phi::DenseTensorUtils::GetMutableMeta(tensor)->dtype = dtype; } else if (var->IsType()) { // NOTE(chenweihang): do nothing // Unsupported set dtype for LoDTensorArray now @@ -318,6 +335,9 @@ void CompatMetaTensor::set_layout(DataLayout layout) { } else if (var->IsType()) { auto* tensor = var->GetMutable()->mutable_value(); phi::DenseTensorUtils::GetMutableMeta(tensor)->layout = layout; + } else if (var->IsType()) { + auto* tensor = var->GetMutable(); + phi::DenseTensorUtils::GetMutableMeta(tensor)->layout = layout; } else if (var->IsType()) { // NOTE(chenweihang): do nothing // Unsupported set dtype for LoDTensorArray now diff --git a/paddle/fluid/framework/operator.cc b/paddle/fluid/framework/operator.cc index 4c28a9b59535eaab47b20af778d4d52309bde80b..613cd4989276d254570cf6f34eb6f343e9696583 100644 --- a/paddle/fluid/framework/operator.cc +++ b/paddle/fluid/framework/operator.cc @@ -2382,6 +2382,17 @@ void OperatorWithKernel::ParseInputDataType( t = &var->Get(); } else if (var->IsType()) { t = &(var->Get().value()); + } else if (var->IsType()) { + const phi::SparseCooTensor* sp_t = &(var->Get()); + PADDLE_ENFORCE_EQ( + sp_t->initialized(), + true, + platform::errors::InvalidArgument("The %s Op's Input Variable `%s` " + "contains uninitialized Tensor.", + Type(), + name)); + *data_type = paddle::framework::TransToProtoVarType(sp_t->dtype()); + return; } else if (var->IsType()) { auto t_arr = &var->Get(); for (size_t j = 0; j < t_arr->size(); j++) { @@ -2419,6 +2430,29 @@ void OperatorWithKernel::ParseMultiInputDataType( t = &var->Get(); } else if (var->IsType()) { t = &(var->Get().value()); + } else if (var->IsType()) { + const phi::SparseCooTensor* sp_t = &(var->Get()); + PADDLE_ENFORCE_EQ( + sp_t->initialized(), + true, + platform::errors::InvalidArgument("The %s Op's Input Variable `%s` " + "contains uninitialized Tensor.", + Type(), + name)); + proto::VarType::Type tmp = + paddle::framework::TransToProtoVarType(sp_t->dtype()); + PADDLE_ENFORCE(tmp == *data_type || *data_type == default_data_type, + platform::errors::InvalidArgument( + "The DataType of %s Op's duplicable or different " + "slot Variable %s must be " + "consistent or reigster GetExpectedKernelType. The " + "current variable type is (%s), but the " + "previous variable type is (%s).", + Type(), + name, + DataTypeToString(tmp), + DataTypeToString(*data_type))); + *data_type = tmp; } else if (var->IsType()) { auto t_arr = &var->Get(); for (size_t j = 0; j < t_arr->size(); j++) { @@ -2663,6 +2697,9 @@ void OperatorWithKernel::BuildPhiKernelContext( } else if (var->IsType()) { tensor_in = &(var->Get()); phi_kernel_context->EmplaceBackInputWithoutSetRange(tensor_in); + } else if (var->IsType()) { + tensor_in = &(var->Get()); + phi_kernel_context->EmplaceBackInputWithoutSetRange(tensor_in); } else if (var->IsType()) { need_prepare_phi_data_ = true; tensor_in = &(var->Get()); @@ -2708,6 +2745,9 @@ void OperatorWithKernel::BuildPhiKernelContext( } else if (var->template IsType()) { tensor_out = var->template GetMutable(); phi_kernel_context->EmplaceBackOutputWithoutSetRange(tensor_out); + } else if (var->template IsType()) { + tensor_out = var->template GetMutable(); + phi_kernel_context->EmplaceBackOutputWithoutSetRange(tensor_out); } else if (var->template IsType()) { tensor_out = var->template GetMutable(); // Note: If the input LoDTensorArray size is 0, the output diff --git a/paddle/fluid/framework/operator.h b/paddle/fluid/framework/operator.h index edb2d539f82ef732b55386cb9eb53f0683769861..6a7d7ae80d3a5523f106d7702a8e5a3b4aca01ce 100644 --- a/paddle/fluid/framework/operator.h +++ b/paddle/fluid/framework/operator.h @@ -531,6 +531,11 @@ class ExecutionArgumentMappingContext : public phi::ArgumentMappingContext { }); } + bool IsSparseCooTensorInput(const std::string& name) const override { + const auto* var = ctx_.InputVar(name); + return var->IsType(); + } + bool IsDenseTensorOutput(const std::string& name) const override { auto vars = ctx_.MultiOutputVar(name); return std::all_of(vars.begin(), vars.end(), [](const Variable* var) { diff --git a/paddle/fluid/framework/tensor.h b/paddle/fluid/framework/tensor.h index fcb061aa93288fa54a73a26e9879e6343c92acc7..81ea7d8f0e7467d9325f01703d3cc9fed2fb9b99 100644 --- a/paddle/fluid/framework/tensor.h +++ b/paddle/fluid/framework/tensor.h @@ -17,6 +17,7 @@ limitations under the License. */ #include "paddle/fluid/framework/data_type.h" #include "paddle/fluid/framework/mixed_vector.h" #include "paddle/phi/core/dense_tensor.h" +#include "paddle/phi/core/sparse_coo_tensor.h" namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/var_desc.cc b/paddle/fluid/framework/var_desc.cc index 6f9574b3f2c46c4cb06dd7743b005d966861d754..d4a53c4135a08b98113d1965d681acb469ac8ec0 100644 --- a/paddle/fluid/framework/var_desc.cc +++ b/paddle/fluid/framework/var_desc.cc @@ -237,6 +237,8 @@ const proto::VarType::TensorDesc &VarDesc::tensor_desc() const { return desc_.type().strings(); case proto::VarType::VOCAB: return desc_.type().vocab(); + case proto::VarType::SPARSE_COO: + return desc_.type().sparse_coo(); default: PADDLE_THROW(platform::errors::Unavailable( "Getting 'tensor_desc' is not supported by the %s type variable.", @@ -284,6 +286,8 @@ proto::VarType::TensorDesc *VarDesc::mutable_tensor_desc() { return desc_.mutable_type()->mutable_strings(); case proto::VarType::VOCAB: return desc_.mutable_type()->mutable_vocab(); + case proto::VarType::SPARSE_COO: + return desc_.mutable_type()->mutable_sparse_coo(); default: PADDLE_THROW( platform::errors::Unavailable("Getting 'mutable_tensor_desc' is not " diff --git a/paddle/fluid/framework/var_type.h b/paddle/fluid/framework/var_type.h index d0d26d599233665b9835f80d12048973be54c84d..bab027868c42f743cd01011bae128c86e7225f19 100644 --- a/paddle/fluid/framework/var_type.h +++ b/paddle/fluid/framework/var_type.h @@ -33,6 +33,7 @@ inline proto::VarType::Type ToVarType(int type) { switch (type) { case proto::VarType::LOD_TENSOR: case proto::VarType::SELECTED_ROWS: + case proto::VarType::SPARSE_COO: case proto::VarType::LOD_RANK_TABLE: case proto::VarType::LOD_TENSOR_ARRAY: case proto::VarType::FETCH_LIST: @@ -59,6 +60,9 @@ inline void VisitVarType(const framework::Variable& var, Visitor visitor) { case proto::VarType::SELECTED_ROWS: visitor(var.Get()); return; + case proto::VarType::SPARSE_COO: + visitor(var.Get()); + return; case proto::VarType::READER: visitor(var.Get()); return; diff --git a/paddle/fluid/framework/var_type_traits.h b/paddle/fluid/framework/var_type_traits.h index ea7ebce2dae6bebf4391fbe6a697c61e6cf42601..d2a4788a5038127d7c790c4f801a1c3d7dddabb7 100644 --- a/paddle/fluid/framework/var_type_traits.h +++ b/paddle/fluid/framework/var_type_traits.h @@ -54,6 +54,7 @@ namespace phi { class DenseTensor; class SelectedRows; +class SparseCooTensor; } // namespace phi // Users should add forward declarations here @@ -180,6 +181,7 @@ struct VarTypeRegistryImpl { using VarTypeRegistry = detail::VarTypeRegistryImpl< Tensor, phi::SelectedRows, + phi::SparseCooTensor, std::vector, LoDRankTable, Strings, @@ -252,6 +254,7 @@ REG_PROTO_VAR_TYPE_TRAIT(float, proto::VarType::FP32); REG_PROTO_VAR_TYPE_TRAIT(Vocab, proto::VarType::VOCAB); REG_PROTO_VAR_TYPE_TRAIT(String, proto::VarType::STRING); REG_PROTO_VAR_TYPE_TRAIT(Strings, proto::VarType::STRINGS); +REG_PROTO_VAR_TYPE_TRAIT(phi::SparseCooTensor, proto::VarType::SPARSE_COO); /** End of variable type registration */ diff --git a/paddle/fluid/framework/variable_helper.cc b/paddle/fluid/framework/variable_helper.cc index 471efc020783576d1fa82ccb066070c0878fd8a1..90dac6191bd989ca06d7be50d4157d5ba98bff86 100644 --- a/paddle/fluid/framework/variable_helper.cc +++ b/paddle/fluid/framework/variable_helper.cc @@ -52,6 +52,8 @@ void InitializeVariable(Variable *var, proto::VarType::Type var_type) { var->GetMutable(); } else if (var_type == proto::VarType::RAW) { // GetMutable will be called in operator + } else if (var_type == proto::VarType::SPARSE_COO) { + var->GetMutable(); } else { PADDLE_THROW(platform::errors::Unavailable( "Variable type %d is not in " diff --git a/paddle/fluid/inference/tensorrt/plugin_arg_mapping_context.cc b/paddle/fluid/inference/tensorrt/plugin_arg_mapping_context.cc index 8c4dd20ee06e38254dc5a080ede4081967c4507c..9b5ab945d77fa7fac590815d3c0516248be18344 100644 --- a/paddle/fluid/inference/tensorrt/plugin_arg_mapping_context.cc +++ b/paddle/fluid/inference/tensorrt/plugin_arg_mapping_context.cc @@ -108,6 +108,10 @@ bool PluginArgumentMappingContext::IsSelectedRowsInputs( const std::string& name) const { return false; } +bool PluginArgumentMappingContext::IsSparseCooTensorInput( + const std::string& name) const { + return false; +} bool PluginArgumentMappingContext::IsDenseTensorVectorInput( const std::string& name) const { return false; diff --git a/paddle/fluid/inference/tensorrt/plugin_arg_mapping_context.h b/paddle/fluid/inference/tensorrt/plugin_arg_mapping_context.h index 1a483626dcea3e90fae7e6b4f16b5e39288e698f..f004040af3b1a11ad180b9cfbbb8655b88e31c11 100644 --- a/paddle/fluid/inference/tensorrt/plugin_arg_mapping_context.h +++ b/paddle/fluid/inference/tensorrt/plugin_arg_mapping_context.h @@ -48,6 +48,8 @@ class PluginArgumentMappingContext : public ::phi::ArgumentMappingContext { bool IsSelectedRowsInputs(const std::string& name) const override; + bool IsSparseCooTensorInput(const std::string& name) const override; + bool IsDenseTensorVectorInput(const std::string& name) const override; bool IsDenseTensorOutput(const std::string& name) const override; diff --git a/paddle/fluid/operators/controlflow/feed_op.cc b/paddle/fluid/operators/controlflow/feed_op.cc index 4cef104496510f4c75d2857cadc1538eb10476cc..e36ddace5b6e1147e2b712f8c15e5448612d7931 100644 --- a/paddle/fluid/operators/controlflow/feed_op.cc +++ b/paddle/fluid/operators/controlflow/feed_op.cc @@ -11,6 +11,7 @@ limitations under the License. */ #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/operator.h" +#include "paddle/phi/core/tensor_utils.h" namespace paddle { namespace framework { @@ -61,6 +62,22 @@ class FeedVariableVisitor { *out_str = in_str; } + void operator()(const phi::SparseCooTensor &in_tensor) const { + phi::SparseCooTensor *out_tensor = + out_var_->GetMutable(); + if (platform::is_same_place(in_tensor.place(), place_)) { + *out_tensor = in_tensor; + } else { + platform::DeviceContext *context = + platform::DeviceContextPool::Instance().Get(place_); + + phi::DenseTensor indices, values; + framework::TensorCopy(in_tensor.indices(), place_, *context, &indices); + framework::TensorCopy(in_tensor.values(), place_, *context, &values); + out_tensor->SetMember(indices, values, in_tensor.meta()); + } + } + private: framework::Variable *out_var_; const platform::Place &place_; diff --git a/paddle/fluid/operators/controlflow/fetch_op.cc b/paddle/fluid/operators/controlflow/fetch_op.cc index c1ed46867f1aca7071ed9e8d55fb21ab7ea1347e..7f179f9d97b968f09497fa914f844d4b91290cd1 100644 --- a/paddle/fluid/operators/controlflow/fetch_op.cc +++ b/paddle/fluid/operators/controlflow/fetch_op.cc @@ -123,6 +123,9 @@ class FetchOp : public framework::OperatorBase { auto &src_item = fetch_var->Get(); auto *dst_item = &(PADDLE_GET(framework::Vocab, fetch_list->at(col))); *dst_item = src_item; + } else if (fetch_var->IsType()) { + auto &src_item = fetch_var->Get(); + fetch_list->at(col) = src_item; } else { auto &src_item = fetch_var->Get(); framework::LoDTensorArray tmp(src_item.size()); diff --git a/paddle/fluid/operators/controlflow/fetch_v2_op.cc b/paddle/fluid/operators/controlflow/fetch_v2_op.cc index 64489c294d1233a710f0e318fec2d6676f7c7132..02af91100c25a5502e7aee80e8e1b2260787c345 100644 --- a/paddle/fluid/operators/controlflow/fetch_v2_op.cc +++ b/paddle/fluid/operators/controlflow/fetch_v2_op.cc @@ -98,6 +98,12 @@ class FetchV2Op : public framework::OperatorWithKernel { return framework::OpKernelType(framework::proto::VarType::FP32, platform::CPUPlace()); } + } else if (fetch_var->IsType()) { + auto &src_item = fetch_var->Get(); + if (!src_item.initialized()) { + return framework::OpKernelType(framework::proto::VarType::FP32, + platform::CPUPlace()); + } } else { auto &src_item = fetch_var->Get(); if (src_item.empty() || !src_item[0].IsInitialized()) { @@ -163,6 +169,12 @@ class FetchV2Kernel { dst_item->ShareDataWith(src_item); dst_item->set_lod(src_item.lod()); } + } else if (fetch_var->IsType()) { + auto &src_item = fetch_var->Get(); + if (!src_item.initialized()) { + return; + } + fetch_list->at(col) = src_item; } else { auto &src_item = fetch_var->Get(); framework::LoDTensorArray tmp(src_item.size()); diff --git a/paddle/fluid/operators/sparse_manual_op.cc b/paddle/fluid/operators/sparse_manual_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..e2ed1ed0ff2314a2462bf53134c3584cd9815ba2 --- /dev/null +++ b/paddle/fluid/operators/sparse_manual_op.cc @@ -0,0 +1,226 @@ +/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include + +#include "paddle/fluid/framework/infershape_utils.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/framework/operator.h" +#include "paddle/phi/core/infermeta_utils.h" +#include "paddle/phi/infermeta/binary.h" +#include "paddle/phi/infermeta/sparse/binary.h" +#include "paddle/phi/infermeta/sparse/unary.h" +#include "paddle/phi/infermeta/unary.h" + +namespace paddle { +namespace operators { + +class SparseSparseCooTensorOpMaker : public framework::OpProtoAndCheckerMaker { + public: + void Make() override { + AddInput("values", "(Tensor), input 0 of sparse_coo_tensor op."); + AddInput("indices", "(Tensor), input 1 of sparse_coo_tensor op."); + AddOutput("out", "(Tensor), output 0 of sparse_coo_tensor op."); + AddAttr>( + "dense_shape", "(vector), attribute 0 for sparse_coo_tensor op."); + AddComment(R"DOC( +TODO: Documentation of sparse_coo_tensor op. +)DOC"); + } +}; + +class SparseSparseCooTensorOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; +}; + +DECLARE_INFER_SHAPE_FUNCTOR( + sparse_sparse_coo_tensor, + SparseSparseCooTensorInferShapeFunctor, + PD_INFER_META(phi::sparse::SparseCooTensorInferMeta)); + +class SparseValuesOpMaker : public framework::OpProtoAndCheckerMaker { + public: + void Make() override { + AddInput("x", "(Tensor), input 0 of sparse_values op."); + AddOutput("out", "(Tensor), output 0 of sparse_values op."); + AddComment(R"DOC( +TODO: Documentation of sparse_values op. +)DOC"); + } +}; + +class SparseValuesOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; +}; + +DECLARE_INFER_SHAPE_FUNCTOR(sparse_values, + SparseValuesInferShapeFunctor, + PD_INFER_META(phi::sparse::ValuesInferMeta)); + +class SparseIndicesOpMaker : public framework::OpProtoAndCheckerMaker { + public: + void Make() override { + AddInput("x", "(Tensor), input 0 of sparse_indices op."); + AddOutput("out", "(Tensor), output 0 of sparse_indices op."); + AddComment(R"DOC( +TODO: Documentation of sparse_indices op. +)DOC"); + } +}; + +class SparseIndicesOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; +}; + +DECLARE_INFER_SHAPE_FUNCTOR(sparse_indices, + SparseIndicesInferShapeFunctor, + PD_INFER_META(phi::sparse::IndicesInferMeta)); + +class SparseToDenseOpMaker : public framework::OpProtoAndCheckerMaker { + public: + void Make() override { + AddInput("x", "(Tensor), input 0 of sparse_to_dense op."); + AddOutput("out", "(Tensor), output 0 of sparse_to_dense op."); + AddComment(R"DOC( +TODO: Documentation of sparse_to_dense op. +)DOC"); + } +}; + +class SparseToDenseOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; +}; + +DECLARE_INFER_SHAPE_FUNCTOR(sparse_to_dense, + SparseToDenseInferShapeFunctor, + PD_INFER_META(phi::UnchangedInferMeta)); + +class SparseReluOpMaker : public framework::OpProtoAndCheckerMaker { + public: + void Make() override { + AddInput("x", "(Tensor), input 0 of sparse_relu op."); + AddOutput("out", "(Tensor), output 0 of sparse_relu op."); + AddComment(R"DOC( +TODO: Documentation of sparse_relu op. +)DOC"); + } +}; + +class SparseReluOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; +}; + +DECLARE_INFER_SHAPE_FUNCTOR(sparse_relu, + SparseReluInferShapeFunctor, + PD_INFER_META(phi::UnchangedInferMeta)); + +class SparseConv3dOpMaker : public framework::OpProtoAndCheckerMaker { + public: + void Make() override { + AddInput("x", "(Tensor), input 0 of sparse_conv3d op."); + AddInput("kernel", "(Tensor), input 1 of sparse_conv3d op."); + AddOutput("out", "(Tensor), output 0 of sparse_conv3d op."); + AddOutput("rulebook", "(Tensor), output 1 of sparse_conv3d op."); + AddOutput("counter", "(Tensor), output 2 of sparse_conv3d op."); + AddAttr>( + "paddings", "(vector), attribute 0 for sparse_conv3d op."); + AddAttr>( + "dilations", "(vector), attribute 1 for sparse_conv3d op."); + AddAttr>( + "strides", "(vector), attribute 2 for sparse_conv3d op."); + AddAttr("groups", "(int), attribute 3 for sparse_conv3d op."); + AddAttr("subm", "(bool), attribute 4 for conv3d_coo op."); + AddAttr("key", "(string), attribute 5 for sparse_conv3d op.") + .SetDefault(""); + AddComment(R"DOC( +TODO: Documentation of sparse_conv3d op. +)DOC"); + } +}; + +class SparseConv3dOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; +}; + +DECLARE_INFER_SHAPE_FUNCTOR(sparse_conv3d, + SparseConv3dInferShapeFunctor, + PD_INFER_META(phi::sparse::Conv3dInferMeta)); + +class SparseAddOpMaker : public framework::OpProtoAndCheckerMaker { + public: + void Make() override { + AddInput("x", "(Tensor), input 0 of sparse_add op."); + AddInput("y", "(Tensor), input 1 of sparse_add op."); + AddOutput("out", "(Tensor), output 0 of sparse_add op."); + AddComment(R"DOC( +TODO: Documentation of sparse_add op. +)DOC"); + } +}; + +class SparseAddOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; +}; + +DECLARE_INFER_SHAPE_FUNCTOR(sparse_add, + SparseAddInferShapeFunctor, + PD_INFER_META(phi::UnchangedInferMeta)); + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; + +REGISTER_OPERATOR(sparse_sparse_coo_tensor, + ops::SparseSparseCooTensorOp, + ops::SparseSparseCooTensorOpMaker, + ops::SparseSparseCooTensorInferShapeFunctor); + +REGISTER_OPERATOR(sparse_values, + ops::SparseValuesOp, + ops::SparseValuesOpMaker, + ops::SparseValuesInferShapeFunctor); + +REGISTER_OPERATOR(sparse_indices, + ops::SparseIndicesOp, + ops::SparseIndicesOpMaker, + ops::SparseIndicesInferShapeFunctor); + +REGISTER_OPERATOR(sparse_to_dense, + ops::SparseToDenseOp, + ops::SparseToDenseOpMaker, + ops::SparseToDenseInferShapeFunctor); + +REGISTER_OPERATOR(sparse_relu, + ops::SparseReluOp, + ops::SparseReluOpMaker, + ops::SparseReluInferShapeFunctor); + +REGISTER_OPERATOR(sparse_conv3d, + ops::SparseConv3dOp, + ops::SparseConv3dOpMaker, + ops::SparseConv3dInferShapeFunctor); + +REGISTER_OPERATOR(sparse_add, + ops::SparseAddOp, + ops::SparseAddOpMaker, + ops::SparseAddInferShapeFunctor); diff --git a/paddle/fluid/pybind/protobuf.cc b/paddle/fluid/pybind/protobuf.cc index ab725575351ea7021c193d30f2dc9cd6dfff0e8d..c68898ea6a6f27ae35ce1aa405ce2d4ebc400af3 100644 --- a/paddle/fluid/pybind/protobuf.cc +++ b/paddle/fluid/pybind/protobuf.cc @@ -275,7 +275,8 @@ void BindVarDsec(pybind11::module *m) { .value("RAW", pd::proto::VarType::RAW) .value("STRING", pd::proto::VarType::STRING) .value("STRINGS", pd::proto::VarType::STRINGS) - .value("VOCAB", pd::proto::VarType::VOCAB); + .value("VOCAB", pd::proto::VarType::VOCAB) + .value("SPARSE_COO", pd::proto::VarType::SPARSE_COO); } void BindOpDesc(pybind11::module *m) { diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc index 7786d3c17d9bcd9de907a3b41f22f6a0a780a587..928697094de36309878bf1964c9a028441a19d83 100644 --- a/paddle/fluid/pybind/pybind.cc +++ b/paddle/fluid/pybind/pybind.cc @@ -1937,6 +1937,9 @@ All parameter, weight, gradient are variables in Paddle. if (data_is_lod_tensor(self[i])) { auto &data = PADDLE_GET(LoDTensor, self[i]); res[i] = py::cast(std::move(data)); + } else if (data_is_sparse_coo_tensor(self[i])) { + auto &data = PADDLE_GET(phi::SparseCooTensor, self[i]); + res[i] = py::cast(std::move(data)); } else { auto &data = PADDLE_GET(LoDTensorArray, self[i]); py::list tmp(data.size()); diff --git a/paddle/fluid/pybind/tensor.cc b/paddle/fluid/pybind/tensor.cc index 8152a11c8193a5f5df85b32ec001b107d38712de..4194a32da08f95c91ab50da3fb6687522818ab4c 100644 --- a/paddle/fluid/pybind/tensor.cc +++ b/paddle/fluid/pybind/tensor.cc @@ -1105,6 +1105,20 @@ void BindTensor(pybind11::module &m) { // NOLINT std::copy(rows.begin(), rows.end(), std::back_inserter(new_rows)); return new_rows; }); + + py::class_(m, "SparseCooTensor") + .def("__init__", + [](phi::SparseCooTensor &instance) { + new (&instance) phi::SparseCooTensor(); + }) + .def("numel", + [](const phi::SparseCooTensor &self) -> int64_t { + return self.numel(); + }) + .def("indices", + [](const phi::SparseCooTensor &self) -> framework::Tensor { + return self.indices(); + }); } } // namespace pybind diff --git a/paddle/phi/core/compat/arg_map_context.h b/paddle/phi/core/compat/arg_map_context.h index 206ad5d96f67560cb43fb9d8c5f05d2f187086b8..3ec35c40c8e5baa360e6bf986ecd2002b70fb0b8 100644 --- a/paddle/phi/core/compat/arg_map_context.h +++ b/paddle/phi/core/compat/arg_map_context.h @@ -109,6 +109,7 @@ class ArgumentMappingContext { virtual bool IsDenseTensorInputs(const std::string& name) const = 0; virtual bool IsSelectedRowsInput(const std::string& name) const = 0; virtual bool IsSelectedRowsInputs(const std::string& name) const = 0; + virtual bool IsSparseCooTensorInput(const std::string& name) const = 0; // For compatibility with LoDTensorArray virtual bool IsDenseTensorVectorInput(const std::string& name) const = 0; diff --git a/paddle/phi/kernels/sparse/cpu/elementwise_kernel.cc b/paddle/phi/kernels/sparse/cpu/elementwise_kernel.cc index 0e46efc0e867377c9a59cd7b42c080367e6e21ca..3addd4bbbfbb0ecb107b0790e6ae70fdf4cb0cd4 100644 --- a/paddle/phi/kernels/sparse/cpu/elementwise_kernel.cc +++ b/paddle/phi/kernels/sparse/cpu/elementwise_kernel.cc @@ -16,6 +16,7 @@ limitations under the License. */ #include "paddle/phi/core/enforce.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/core/tensor_meta.h" +#include "paddle/phi/core/tensor_utils.h" #include "paddle/phi/core/visit_type.h" #include "paddle/phi/kernels/elementwise_add_kernel.h" #include "paddle/phi/kernels/elementwise_kernel.h" diff --git a/paddle/phi/ops/compat/sparse_manual_op_sig.cc b/paddle/phi/ops/compat/sparse_manual_op_sig.cc new file mode 100644 index 0000000000000000000000000000000000000000..45f8a417a191433368f9241b74d27e9da7af872b --- /dev/null +++ b/paddle/phi/ops/compat/sparse_manual_op_sig.cc @@ -0,0 +1,108 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/phi/core/compat/op_utils.h" + +namespace phi { + +// TODO(zhangkaihuo): add csr op + +KernelSignature SparseSparseCooTensorOpArgumentMapping( + const ArgumentMappingContext& ctx) { + return KernelSignature( + "sparse_coo_tensor", {"values", "indices"}, {"dense_shape"}, {"out"}); +} + +KernelSignature SparseValuesOpArgumentMapping( + const ArgumentMappingContext& ctx) { + if (ctx.IsSparseCooTensorInput("x")) { + return KernelSignature("values_coo", {"x"}, {}, {"out"}); + } else { + return KernelSignature("unregistered", {}, {}, {}); + } +} + +KernelSignature SparseIndicesOpArgumentMapping( + const ArgumentMappingContext& ctx) { + if (ctx.IsSparseCooTensorInput("x")) { + return KernelSignature("indices_coo", {"x"}, {}, {"out"}); + } else { + return KernelSignature("unregistered", {}, {}, {}); + } +} + +KernelSignature SparseToDenseOpArgumentMapping( + const ArgumentMappingContext& ctx) { + if (ctx.IsSparseCooTensorInput("x")) { + return KernelSignature("coo_to_dense", {"x"}, {}, {"out"}); + } else { + return KernelSignature("unregistered", {}, {}, {}); + } +} + +KernelSignature SparseReluOpArgumentMapping(const ArgumentMappingContext& ctx) { + if (ctx.IsSparseCooTensorInput("x")) { + return KernelSignature("relu_coo", {"x"}, {}, {"out"}); + } else { + return KernelSignature("unregistered", {}, {}, {}); + } +} + +KernelSignature SparseConv3dOpArgumentMapping( + const ArgumentMappingContext& ctx) { + if (ctx.IsSparseCooTensorInput("x")) { + return KernelSignature( + "conv3d_coo", + {"x", "kernel"}, + {"paddings", "dilations", "strides", "groups", "subm", "key"}, + {"out", "rulebook", "counter"}); + } else { + return KernelSignature("unregistered", {}, {}, {}); + } +} + +KernelSignature SparseAddOpArgumentMapping(const ArgumentMappingContext& ctx) { + if (ctx.IsSparseCooTensorInput("x") && ctx.IsSparseCooTensorInput("y")) { + return KernelSignature("add_coo_coo", {"x", "y"}, {}, {"out"}); + } else if (ctx.IsSparseCooTensorInput("x") && ctx.IsDenseTensorInput("y")) { + return KernelSignature("add_coo_dense", {"x", "y"}, {}, {"out"}); + } else { + return KernelSignature("unregistered", {}, {}, {}); + } +} + +} // namespace phi + +PD_REGISTER_BASE_KERNEL_NAME(sparse_sparse_coo_tensor, sparse_coo_tensor); +PD_REGISTER_ARG_MAPPING_FN(sparse_sparse_coo_tensor, + phi::SparseSparseCooTensorOpArgumentMapping); + +PD_REGISTER_BASE_KERNEL_NAME(sparse_values, values_coo); +PD_REGISTER_ARG_MAPPING_FN(sparse_values, phi::SparseValuesOpArgumentMapping); + +PD_REGISTER_BASE_KERNEL_NAME(sparse_indices, indices_coo); +PD_REGISTER_ARG_MAPPING_FN(sparse_indices, phi::SparseIndicesOpArgumentMapping); + +PD_REGISTER_BASE_KERNEL_NAME(sparse_to_dense, coo_to_dense); +PD_REGISTER_ARG_MAPPING_FN(sparse_to_dense, + phi::SparseToDenseOpArgumentMapping); + +PD_REGISTER_BASE_KERNEL_NAME(sparse_relu, relu_coo); +PD_REGISTER_ARG_MAPPING_FN(sparse_relu, phi::SparseReluOpArgumentMapping); + +PD_REGISTER_BASE_KERNEL_NAME(sparse_conv3d, conv3d_coo); +PD_REGISTER_ARG_MAPPING_FN(sparse_conv3d, phi::SparseConv3dOpArgumentMapping); + +PD_REGISTER_BASE_KERNEL_NAME(sparse_add, add_coo_coo); +PD_REGISTER_ARG_MAPPING_FN(sparse_add, phi::SparseAddOpArgumentMapping); diff --git a/paddle/phi/tests/ops/test_op_signature.h b/paddle/phi/tests/ops/test_op_signature.h index 426bb6b1243d9548cd05c1a91bfded48fd962ef2..1b067c0aa17e58f929b12a322a0804272bb00b44 100644 --- a/paddle/phi/tests/ops/test_op_signature.h +++ b/paddle/phi/tests/ops/test_op_signature.h @@ -86,6 +86,10 @@ class TestArgumentMappingContext : public phi::ArgumentMappingContext { return false; } + bool IsSparseCooTensorInput(const std::string& name) const override { + return false; + } + bool IsDenseTensorOutput(const std::string& name) const override { return dense_tensor_outputs.count(name) > 0; } diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index 3e4465ca7ab12b7746e3d1f1e05c2b9b2fd83f1c..08ee989355368c11aaeadc58b7f6bda133f3143e 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -1377,6 +1377,9 @@ class Variable(object): type = core.VarDesc.VarType.STRINGS lod_level = None + if type == core.VarDesc.VarType.SPARSE_COO: + lod_level = None + self.belong_to_optimizer = belong_to_optimizer self.error_clip = error_clip diff --git a/python/paddle/fluid/layer_helper_base.py b/python/paddle/fluid/layer_helper_base.py index ef3ccaa57a3ab47853fdb763d90d559a1960caf5..3a688cf9f444f21fe45514c568bc065202d9ac78 100644 --- a/python/paddle/fluid/layer_helper_base.py +++ b/python/paddle/fluid/layer_helper_base.py @@ -408,6 +408,30 @@ class LayerHelperBase(object): persistable=False, stop_gradient=stop_gradient) + def create_sparse_variable_for_type_inference(self, + dtype, + stop_gradient=False, + shape=None): + """Create a temporary sparse variable that should be type inferred layer. + + Note: + The default type will be set to SPARSE_COO. However, when + the var is used as operator output, its type will be updated + based on operator's `VarTypeInference` implementation in + infer_var_type. + """ + # set global dtype + if not dtype: + dtype = self.__dtype + return self.main_program.current_block().create_var( + name=unique_name.generate_with_ignorable_key(".".join( + [self.name, 'tmp'])), + dtype=dtype, + shape=shape, + type=core.VarDesc.VarType.SPARSE_COO, + persistable=False, + stop_gradient=stop_gradient) + def create_variable(self, *args, **kwargs): """Create Variable for this layers. Returns created Variable. diff --git a/python/paddle/fluid/layers/math_op_patch.py b/python/paddle/fluid/layers/math_op_patch.py index 4e5643327dd13b51b16c17d59883a402748c2df0..a0fd6782db6331c5f8811b2ce86f1934867b2b84 100644 --- a/python/paddle/fluid/layers/math_op_patch.py +++ b/python/paddle/fluid/layers/math_op_patch.py @@ -78,6 +78,10 @@ def monkey_patch_variable(): tmp_name = unique_tmp_name() return block.create_var(name=tmp_name, dtype=dtype) + def create_new_tmp_sparse_var(block, dtype, type): + tmp_name = unique_tmp_name() + return block.create_var(name=tmp_name, dtype=dtype, type=type) + def create_tensor(block, value, dtype, shape): value = float(value) var = create_new_tmp_var(block, dtype) @@ -431,6 +435,33 @@ def monkey_patch_variable(): __impl__.__name__ = method_name return __impl__ + def values(var): + block = current_block(var) + out = create_new_tmp_var(block, var.dtype) + block.append_op(type="sparse_values", + inputs={"x": [var]}, + outputs={"out": [out]}, + attrs={}) + return out + + def indices(var): + block = current_block(var) + out = create_new_tmp_var(block, var.dtype) + block.append_op(type="sparse_indices", + inputs={"x": [var]}, + outputs={"out": [out]}, + attrs={}) + return out + + def to_dense(var): + block = current_block(var) + out = create_new_tmp_var(block, var.dtype) + block.append_op(type="sparse_to_dense", + inputs={"x": [var]}, + outputs={"out": [out]}, + attrs={}) + return out + variable_methods = [ # b=-a ('__neg__', _neg_), @@ -483,7 +514,10 @@ def monkey_patch_variable(): ('__lt__', _binary_creator_('__lt__', 'less_than', False, None)), ('__le__', _binary_creator_('__le__', 'less_equal', False, None)), ('__gt__', _binary_creator_('__gt__', 'greater_than', False, None)), - ('__ge__', _binary_creator_('__ge__', 'greater_equal', False, None)) + ('__ge__', _binary_creator_('__ge__', 'greater_equal', False, None)), + ('values', values), + ('indices', indices), + ('to_dense', to_dense), ] global _already_patch_variable diff --git a/python/paddle/fluid/tests/unittests/test_sparse_conv_op.py b/python/paddle/fluid/tests/unittests/test_sparse_conv_op.py index c92c0d7b2f8d797d6e103bf8212031a97aea0b0c..9d5fbb56466c54147c8a8bcc82ce67f3a5d4936a 100644 --- a/python/paddle/fluid/tests/unittests/test_sparse_conv_op.py +++ b/python/paddle/fluid/tests/unittests/test_sparse_conv_op.py @@ -18,6 +18,7 @@ import paddle from paddle import _C_ops, _legacy_C_ops from paddle.fluid import core from paddle.fluid.framework import _test_eager_guard +import paddle.incubate.sparse as sparse class TestSparseConv(unittest.TestCase): @@ -158,3 +159,66 @@ class TestSparseConv(unittest.TestCase): sp_conv3d.bias.grad.numpy(), atol=1e-5, rtol=1e-5) + + +class TestStatic(unittest.TestCase): + + def test(self): + paddle.enable_static() + indices = paddle.static.data(name='indices', + shape=[4, 4], + dtype='int32') + values = paddle.static.data(name='values', + shape=[4, 1], + dtype='float32') + dense_shape = [1, 1, 3, 4, 1] + sp_x = sparse.sparse_coo_tensor(indices, values, dense_shape) + + weight_shape = [1, 3, 3, 1, 1] + weight = paddle.static.data(name='weight', + shape=weight_shape, + dtype='float32') + bias_shape = [1] + bias = paddle.static.data(name='bias', + shape=bias_shape, + dtype='float32') + out = sparse.nn.functional.conv3d(sp_x, + weight, + bias, + stride=1, + padding=0, + dilation=1, + groups=1, + data_format="NDHWC") + sp_out = sparse.nn.functional.relu(out) + out_indices = sp_out.indices() + out_values = sp_out.values() + out = sp_out.to_dense() + + exe = paddle.static.Executor() + + indices_data = [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 1, 2], [1, 3, 2, 3]] + values_data = [[1.0], [2.0], [3.0], [4.0]] + weight_data = np.array([[[[[1], [1], [1]], [[1], [1], [1]], + [[1], [1], [1]]]]]).astype('float32') + weight_data = weight_data.reshape(weight_shape) + bias_data = np.array([1]).astype('float32') + + fetch = exe.run(feed={ + 'indices': indices_data, + 'values': values_data, + 'weight': weight_data, + 'bias': bias_data + }, + fetch_list=[out, out_indices, out_values], + return_numpy=True) + correct_out = np.array([[[[[5.0], [11.0]]]]]).astype('float64') + correct_out_values = [[5.0], [11.0]] + assert np.array_equal(correct_out, fetch[0]) + assert np.array_equal(correct_out_values, fetch[2]) + assert out_indices.dtype == paddle.int32 + paddle.disable_static() + + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/incubate/sparse/binary.py b/python/paddle/incubate/sparse/binary.py index 5a6a77490383285046a0fb78e361a12148e09d6f..a39a0a756d78f331dde75514002807d79f2a73e5 100644 --- a/python/paddle/incubate/sparse/binary.py +++ b/python/paddle/incubate/sparse/binary.py @@ -14,6 +14,9 @@ from paddle import _C_ops, _legacy_C_ops from paddle.fluid.framework import dygraph_only, core +from paddle import in_dynamic_mode +from paddle.fluid.layer_helper import LayerHelper +from .unary import cast __all__ = [] @@ -254,7 +257,19 @@ def add(x, y, name=None): """ if y.dtype != x.dtype: y = cast(y, None, x.dtype) - return _C_ops.sparse_add(x, y) + + if in_dynamic_mode(): + return _C_ops.sparse_add(x, y) + else: + op_type = 'sparse_add' + inputs = {'x': x, 'y': y} + helper = LayerHelper(op_type) + out = helper.create_sparse_variable_for_type_inference(x.dtype) + helper.append_op(type=op_type, + inputs=inputs, + outputs={'out': out}, + attrs={}) + return out @dygraph_only diff --git a/python/paddle/incubate/sparse/creation.py b/python/paddle/incubate/sparse/creation.py index 2fe5a560d662623496d827e7ee36380beb63d473..72f4b3fba610e3ec60cbc0f81254a242f926cc85 100644 --- a/python/paddle/incubate/sparse/creation.py +++ b/python/paddle/incubate/sparse/creation.py @@ -18,6 +18,8 @@ from paddle.fluid.framework import core, dygraph_only from paddle.fluid.framework import _current_expected_place, _get_paddle_place from paddle.tensor import to_tensor, max from paddle.fluid.data_feeder import check_variable_and_dtype, check_type, check_dtype, convert_dtype +from paddle import in_dynamic_mode +from paddle.fluid.layer_helper import LayerHelper import numpy as np @@ -64,7 +66,6 @@ def _check_indices_dtype(dtype): ) -@dygraph_only def sparse_coo_tensor(indices, values, shape=None, @@ -114,53 +115,68 @@ def sparse_coo_tensor(indices, # values=[1., 2., 3.]) """ - place = _get_place(place) + if in_dynamic_mode(): + place = _get_place(place) - if not isinstance(indices, core.eager.Tensor): - indices = to_tensor(indices, - dtype=None, - place=place, - stop_gradient=True) - if not isinstance(values, core.eager.Tensor): - values = to_tensor(values, dtype, place, stop_gradient) - if len(indices.shape) != 2: - raise ValueError("'indices' must be 2-D.") + if not isinstance(indices, core.eager.Tensor): + indices = to_tensor(indices, + dtype=None, + place=place, + stop_gradient=True) + if not isinstance(values, core.eager.Tensor): + values = to_tensor(values, dtype, place, stop_gradient) + if len(indices.shape) != 2: + raise ValueError("'indices' must be 2-D.") - nnz = indices.shape[1] - sparse_dim = indices.shape[0] + nnz = indices.shape[1] + sparse_dim = indices.shape[0] - _check_indices_dtype(indices.dtype) + _check_indices_dtype(indices.dtype) - if nnz != values.shape[0]: - raise ValueError( - "the indices and values must have same number of non-zero, but get {} and {}" - .format(nnz, values.shape[0])) + if nnz != values.shape[0]: + raise ValueError( + "the indices and values must have same number of non-zero, but get {} and {}" + .format(nnz, values.shape[0])) - dense_dim = len(values.shape) - 1 + dense_dim = len(values.shape) - 1 - if not indices.place._equals(place): - indices = indices._copy_to(place, False) + if not indices.place._equals(place): + indices = indices._copy_to(place, False) - if not values.place._equals(place): - values = values._copy_to(place, False) - values = _handle_dtype(values, dtype) - values.stop_gradient = stop_gradient + if not values.place._equals(place): + values = values._copy_to(place, False) + values = _handle_dtype(values, dtype) + values.stop_gradient = stop_gradient - min_shape = _infer_dense_shape(indices, values) + min_shape = _infer_dense_shape(indices, values) - if shape is None: - shape = min_shape - else: - if shape < min_shape: - raise ValueError( - "the minimun shape required is {}, but get {}".format( - min_shape, shape)) - if len(shape) != sparse_dim + dense_dim: - raise ValueError( - "the number of dimensions(len(shape) must be sparse_dim({}) + dense_dim({}), but get {}" - .format(sparse_dim, dense_dim, len(shape))) + if shape is None: + shape = min_shape + else: + if shape < min_shape: + raise ValueError( + "the minimun shape required is {}, but get {}".format( + min_shape, shape)) + if len(shape) != sparse_dim + dense_dim: + raise ValueError( + "the number of dimensions(len(shape) must be sparse_dim({}) + dense_dim({}), but get {}" + .format(sparse_dim, dense_dim, len(shape))) + + return _C_ops.sparse_sparse_coo_tensor(values, indices, shape) - return _C_ops.sparse_sparse_coo_tensor(values, indices, shape) + else: + op_type = 'sparse_sparse_coo_tensor' + inputs = {'values': values, 'indices': indices} + if shape[0] is None: + shape[0] = -1 + attrs = {'dense_shape': shape} + helper = LayerHelper(op_type) + out = helper.create_sparse_variable_for_type_inference(dtype) + helper.append_op(type=op_type, + inputs=inputs, + outputs={'out': out}, + attrs=attrs) + return out #TODO: need to support shape is None diff --git a/python/paddle/incubate/sparse/nn/functional/activation.py b/python/paddle/incubate/sparse/nn/functional/activation.py index 4faaa696e6dd5ca88e29cf0535b4a4715bc108e0..7cc6920b3904f33d4e5dec44167bbd25f0e41303 100644 --- a/python/paddle/incubate/sparse/nn/functional/activation.py +++ b/python/paddle/incubate/sparse/nn/functional/activation.py @@ -16,9 +16,10 @@ __all__ = [] from paddle import _C_ops, _legacy_C_ops from paddle.fluid.framework import dygraph_only +from paddle import in_dynamic_mode +from paddle.fluid.layer_helper import LayerHelper -@dygraph_only def relu(x, name=None): """ sparse relu activation, requiring x to be a SparseCooTensor or SparseCsrTensor. @@ -45,7 +46,17 @@ def relu(x, name=None): out = paddle.incubate.sparse.nn.functional.relu(sparse_x) # [0., 0., 1.] """ - return _C_ops.sparse_relu(x) + if in_dynamic_mode(): + return _C_ops.sparse_relu(x) + else: + op_type = 'sparse_relu' + helper = LayerHelper(op_type) + out = helper.create_sparse_variable_for_type_inference(x.dtype) + helper.append_op(type=op_type, + inputs={'x': x}, + outputs={'out': out}, + attrs={}) + return out @dygraph_only diff --git a/python/paddle/incubate/sparse/nn/functional/conv.py b/python/paddle/incubate/sparse/nn/functional/conv.py index 284ce1020bbf242bd0ad8c6fbc88fd5d4f373b86..d8cece7cdef38d63cff9e94e33d075dce4eebe57 100644 --- a/python/paddle/incubate/sparse/nn/functional/conv.py +++ b/python/paddle/incubate/sparse/nn/functional/conv.py @@ -19,8 +19,8 @@ from paddle.fluid.layers.utils import convert_to_list from paddle.fluid.layers.nn import elementwise_add from ...creation import sparse_coo_tensor from ...binary import add -from paddle.tensor import arange from paddle.nn.functional.conv import _update_padding_nd +from paddle.fluid.layer_helper import LayerHelper def _conv3d(x, @@ -34,7 +34,6 @@ def _conv3d(x, key=None, data_format="NDHWC", name=None): - assert in_dynamic_mode(), "Currently, only support dynamic mode" assert groups == 1, "Currently, only support groups=1" dims = 3 @@ -63,15 +62,41 @@ def _conv3d(x, padding, padding_algorithm = _update_padding_nd(padding, channel_last, dims) stride = convert_to_list(stride, dims, 'stride') dilation = convert_to_list(dilation, dims, 'dilation') - op_type = "conv3d" - pre_bias = _C_ops.sparse_conv3d(x, weight, padding, dilation, stride, - groups, subm, - key if key is not None else "") - if bias is not None: - return add(pre_bias, bias) + if in_dynamic_mode(): + pre_bias = _C_ops.sparse_conv3d(x, weight, padding, dilation, stride, + groups, subm, + key if key is not None else "") + if bias is not None: + return add(pre_bias, bias) + else: + return pre_bias else: - return pre_bias + inputs = {'x': x, 'kernel': weight} + attrs = { + 'paddings': padding, + 'dilations': dilation, + 'strides': stride, + 'groups': groups, + 'subm': subm, + 'key': key + } + op_type = 'sparse_conv3d' + helper = LayerHelper(op_type, **locals()) + rulebook = helper.create_variable_for_type_inference(dtype='int32', + stop_gradient=True) + counter = helper.create_variable_for_type_inference(dtype='int32', + stop_gradient=True) + pre_bias = helper.create_sparse_variable_for_type_inference(x.dtype) + outputs = {"out": pre_bias, "rulebook": rulebook, "counter": counter} + helper.append_op(type=op_type, + inputs=inputs, + outputs=outputs, + attrs=attrs) + if bias is not None: + return add(pre_bias, bias) + else: + return pre_bias def conv3d(x,