未验证 提交 61a933ac 编写于 作者: H HongyuJia 提交者: GitHub

[Polish Namespace] Polish operants namespace (#50420)

* polish namespace

* change static_tensor_operants

* polish namespace
上级 78eb2d87
...@@ -46,8 +46,8 @@ limitations under the License. */ ...@@ -46,8 +46,8 @@ limitations under the License. */
#endif #endif
#include "gflags/gflags.h" #include "gflags/gflags.h"
#include "paddle/phi/api/include/operants_manager.h"
#include "paddle/phi/api/include/tensor_operants.h" #include "paddle/phi/api/include/tensor_operants.h"
#include "paddle/phi/core/operants_manager.h"
DECLARE_string(tensor_operants_mode); DECLARE_string(tensor_operants_mode);
...@@ -278,9 +278,8 @@ static void RunKernelFunc(const framework::ExecutionContext& ctx, ...@@ -278,9 +278,8 @@ static void RunKernelFunc(const framework::ExecutionContext& ctx,
VLOG(3) << "Custom Operator: Run ComputeFunc."; VLOG(3) << "Custom Operator: Run ComputeFunc.";
FLAGS_tensor_operants_mode = "phi"; FLAGS_tensor_operants_mode = "phi";
if (paddle::operants::OperantsManager::Instance().phi_operants.get() == if (paddle::OperantsManager::Instance().phi_operants.get() == nullptr) {
nullptr) { paddle::OperantsManager::Instance().phi_operants.reset(
paddle::operants::OperantsManager::Instance().phi_operants.reset(
new paddle::operants::PhiTensorOperants()); new paddle::operants::PhiTensorOperants());
VLOG(4) << "Initialize phi tensor operants successfully"; VLOG(4) << "Initialize phi tensor operants successfully";
} }
......
...@@ -15,16 +15,16 @@ ...@@ -15,16 +15,16 @@
#include "paddle/fluid/prim/tests/init_env_utils.h" #include "paddle/fluid/prim/tests/init_env_utils.h"
#include "paddle/fluid/prim/utils/eager/eager_tensor_operants.h" #include "paddle/fluid/prim/utils/eager/eager_tensor_operants.h"
#include "paddle/fluid/prim/utils/static/static_tensor_operants.h" #include "paddle/fluid/prim/utils/static/static_tensor_operants.h"
#include "paddle/phi/core/operants_manager.h" #include "paddle/phi/api/include/operants_manager.h"
namespace paddle { namespace paddle {
namespace prim { namespace prim {
void InitTensorOperants() { void InitTensorOperants() {
paddle::operants::OperantsManager::Instance().eager_operants.reset( paddle::OperantsManager::Instance().eager_operants.reset(
new paddle::operants::EagerTensorOperants()); new paddle::prim::EagerTensorOperants());
paddle::operants::OperantsManager::Instance().static_operants.reset( paddle::OperantsManager::Instance().static_operants.reset(
new paddle::operants::StaticTensorOperants()); new paddle::prim::StaticTensorOperants());
} }
} // namespace prim } // namespace prim
......
...@@ -23,9 +23,9 @@ ...@@ -23,9 +23,9 @@
#include "paddle/fluid/prim/utils/static/desc_tensor.h" #include "paddle/fluid/prim/utils/static/desc_tensor.h"
#include "paddle/fluid/prim/utils/static/static_tensor_operants.h" #include "paddle/fluid/prim/utils/static/static_tensor_operants.h"
#include "paddle/fluid/prim/utils/utils.h" #include "paddle/fluid/prim/utils/utils.h"
#include "paddle/phi/api/include/operants_manager.h"
#include "paddle/phi/core/enforce.h" #include "paddle/phi/core/enforce.h"
#include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/operants_manager.h"
DECLARE_bool(prim_enabled); DECLARE_bool(prim_enabled);
DECLARE_string(tensor_operants_mode); DECLARE_string(tensor_operants_mode);
...@@ -148,8 +148,8 @@ class TestCompositeGradMaker : public CompositeGradOpMakerBase { ...@@ -148,8 +148,8 @@ class TestCompositeGradMaker : public CompositeGradOpMakerBase {
TEST(StaticPrim, TanhBackwardComposite) { TEST(StaticPrim, TanhBackwardComposite) {
// Initialized environment // Initialized environment
FLAGS_tensor_operants_mode = "static"; FLAGS_tensor_operants_mode = "static";
paddle::operants::OperantsManager::Instance().static_operants.reset( paddle::OperantsManager::Instance().static_operants.reset(
new paddle::operants::StaticTensorOperants()); new paddle::prim::StaticTensorOperants());
TestBaseProgram base_program = TestBaseProgram(); TestBaseProgram base_program = TestBaseProgram();
auto* target_block = base_program.GetBlock(0); auto* target_block = base_program.GetBlock(0);
...@@ -234,8 +234,8 @@ TEST(StaticPrim, TanhBackwardComposite) { ...@@ -234,8 +234,8 @@ TEST(StaticPrim, TanhBackwardComposite) {
TEST(StaticCompositeGradMaker, TestMutiInputMethod) { TEST(StaticCompositeGradMaker, TestMutiInputMethod) {
// Initialized environment // Initialized environment
FLAGS_tensor_operants_mode = "static"; FLAGS_tensor_operants_mode = "static";
paddle::operants::OperantsManager::Instance().static_operants.reset( paddle::OperantsManager::Instance().static_operants.reset(
new paddle::operants::StaticTensorOperants()); new paddle::prim::StaticTensorOperants());
TestBaseProgram base_program = TestBaseProgram(); TestBaseProgram base_program = TestBaseProgram();
auto* target_block = base_program.GetBlock(0); auto* target_block = base_program.GetBlock(0);
...@@ -301,8 +301,8 @@ TEST(StaticCompositeGradMaker, TestMutiInputMethod) { ...@@ -301,8 +301,8 @@ TEST(StaticCompositeGradMaker, TestMutiInputMethod) {
TEST(StaticCompositeGradMaker, TestMutiOutputMethod) { TEST(StaticCompositeGradMaker, TestMutiOutputMethod) {
// Initialized environment // Initialized environment
FLAGS_tensor_operants_mode = "static"; FLAGS_tensor_operants_mode = "static";
paddle::operants::OperantsManager::Instance().static_operants.reset( paddle::OperantsManager::Instance().static_operants.reset(
new paddle::operants::StaticTensorOperants()); new paddle::prim::StaticTensorOperants());
TestBaseProgram base_program = TestBaseProgram(); TestBaseProgram base_program = TestBaseProgram();
auto* target_block = base_program.GetBlock(0); auto* target_block = base_program.GetBlock(0);
......
...@@ -19,11 +19,11 @@ ...@@ -19,11 +19,11 @@
namespace paddle { namespace paddle {
namespace operants { namespace prim {
Tensor EagerTensorOperants::multiply(const Tensor& x, const Tensor& y) { Tensor EagerTensorOperants::multiply(const Tensor& x, const Tensor& y) {
return ::multiply_ad_func(x, y); return ::multiply_ad_func(x, y);
} }
} // namespace operants } // namespace prim
} // namespace paddle } // namespace paddle
...@@ -14,13 +14,16 @@ ...@@ -14,13 +14,16 @@
#pragma once #pragma once
#include "paddle/phi/api/include/operants_base.h"
#include "paddle/phi/api/include/tensor.h" #include "paddle/phi/api/include/tensor.h"
#include "paddle/phi/core/macros.h" #include "paddle/phi/core/macros.h"
#include "paddle/phi/core/operants_base.h"
namespace paddle { namespace paddle {
namespace operants { namespace prim {
using Tensor = paddle::experimental::Tensor;
using TensorOperantsBase = paddle::operants::TensorOperantsBase;
class EagerTensorOperants : public TensorOperantsBase { class EagerTensorOperants : public TensorOperantsBase {
public: public:
...@@ -32,5 +35,5 @@ class EagerTensorOperants : public TensorOperantsBase { ...@@ -32,5 +35,5 @@ class EagerTensorOperants : public TensorOperantsBase {
DISABLE_COPY_AND_ASSIGN(EagerTensorOperants); DISABLE_COPY_AND_ASSIGN(EagerTensorOperants);
}; };
} // namespace operants } // namespace prim
} // namespace paddle } // namespace paddle
...@@ -20,12 +20,12 @@ ...@@ -20,12 +20,12 @@
namespace paddle { namespace paddle {
namespace operants { namespace prim {
using DescTensor = paddle::prim::DescTensor; using DescTensor = paddle::prim::DescTensor;
Tensor StaticTensorOperants::multiply(const Tensor& x, const Tensor& y) { Tensor StaticTensorOperants::multiply(const Tensor& x, const Tensor& y) {
return paddle::prim::multiply<DescTensor>(x, y); return paddle::prim::multiply<DescTensor>(x, y);
} }
} // namespace operants } // namespace prim
} // namespace paddle } // namespace paddle
...@@ -14,13 +14,16 @@ ...@@ -14,13 +14,16 @@
#pragma once #pragma once
#include "paddle/phi/api/include/operants_base.h"
#include "paddle/phi/api/include/tensor.h" #include "paddle/phi/api/include/tensor.h"
#include "paddle/phi/core/macros.h" #include "paddle/phi/core/macros.h"
#include "paddle/phi/core/operants_base.h"
namespace paddle { namespace paddle {
namespace operants { namespace prim {
using Tensor = paddle::experimental::Tensor;
using TensorOperantsBase = paddle::operants::TensorOperantsBase;
class StaticTensorOperants : public TensorOperantsBase { class StaticTensorOperants : public TensorOperantsBase {
public: public:
...@@ -32,5 +35,5 @@ class StaticTensorOperants : public TensorOperantsBase { ...@@ -32,5 +35,5 @@ class StaticTensorOperants : public TensorOperantsBase {
DISABLE_COPY_AND_ASSIGN(StaticTensorOperants); DISABLE_COPY_AND_ASSIGN(StaticTensorOperants);
}; };
} // namespace operants } // namespace prim
} // namespace paddle } // namespace paddle
...@@ -61,8 +61,8 @@ typedef SSIZE_T ssize_t; ...@@ -61,8 +61,8 @@ typedef SSIZE_T ssize_t;
#endif #endif
#include "gflags/gflags.h" #include "gflags/gflags.h"
#include "paddle/phi/api/include/operants_manager.h"
#include "paddle/phi/api/include/tensor_operants.h" #include "paddle/phi/api/include/tensor_operants.h"
#include "paddle/phi/core/operants_manager.h"
DECLARE_string(tensor_operants_mode); DECLARE_string(tensor_operants_mode);
...@@ -503,10 +503,10 @@ static PyObject* eager_api_init_eager_and_static_tensor_operants( ...@@ -503,10 +503,10 @@ static PyObject* eager_api_init_eager_and_static_tensor_operants(
PyObject* self, PyObject* args, PyObject* kwargs) { PyObject* self, PyObject* args, PyObject* kwargs) {
EAGER_TRY EAGER_TRY
paddle::operants::OperantsManager::Instance().eager_operants.reset( paddle::OperantsManager::Instance().eager_operants.reset(
new paddle::operants::EagerTensorOperants()); new paddle::prim::EagerTensorOperants());
paddle::operants::OperantsManager::Instance().static_operants.reset( paddle::OperantsManager::Instance().static_operants.reset(
new paddle::operants::StaticTensorOperants()); new paddle::prim::StaticTensorOperants());
VLOG(4) << "Initialize eager and static tensor operants successfully"; VLOG(4) << "Initialize eager and static tensor operants successfully";
RETURN_PY_NONE RETURN_PY_NONE
...@@ -518,9 +518,8 @@ static PyObject* eager_api_run_custom_op(PyObject* self, ...@@ -518,9 +518,8 @@ static PyObject* eager_api_run_custom_op(PyObject* self,
PyObject* kwargs) { PyObject* kwargs) {
EAGER_TRY EAGER_TRY
FLAGS_tensor_operants_mode = "phi"; FLAGS_tensor_operants_mode = "phi";
if (paddle::operants::OperantsManager::Instance().phi_operants.get() == if (paddle::OperantsManager::Instance().phi_operants.get() == nullptr) {
nullptr) { paddle::OperantsManager::Instance().phi_operants.reset(
paddle::operants::OperantsManager::Instance().phi_operants.reset(
new paddle::operants::PhiTensorOperants()); new paddle::operants::PhiTensorOperants());
VLOG(4) << "Initialize phi tensor operants successfully"; VLOG(4) << "Initialize phi tensor operants successfully";
} }
......
...@@ -14,15 +14,14 @@ ...@@ -14,15 +14,14 @@
#pragma once #pragma once
#include "paddle/phi/api/include/operants_base.h"
#include "paddle/phi/api/include/tensor.h" #include "paddle/phi/api/include/tensor.h"
#include "paddle/phi/core/macros.h" #include "paddle/phi/core/macros.h"
#include "paddle/phi/core/operants_base.h"
namespace paddle { namespace paddle {
namespace operants {
using Tensor = paddle::experimental::Tensor; using Tensor = paddle::experimental::Tensor;
using TensorOperantsBase = paddle::operants::TensorOperantsBase;
/** /**
* [ Why need OperantsManager? ] * [ Why need OperantsManager? ]
...@@ -73,5 +72,4 @@ class OperantsManager { ...@@ -73,5 +72,4 @@ class OperantsManager {
DISABLE_COPY_AND_ASSIGN(OperantsManager); DISABLE_COPY_AND_ASSIGN(OperantsManager);
}; };
} // namespace operants
} // namespace paddle } // namespace paddle
...@@ -14,9 +14,9 @@ ...@@ -14,9 +14,9 @@
#pragma once #pragma once
#include "paddle/phi/api/include/operants_base.h"
#include "paddle/phi/api/include/tensor.h" #include "paddle/phi/api/include/tensor.h"
#include "paddle/phi/core/macros.h" #include "paddle/phi/core/macros.h"
#include "paddle/phi/core/operants_base.h"
namespace paddle { namespace paddle {
......
...@@ -316,3 +316,7 @@ cc_library( ...@@ -316,3 +316,7 @@ cc_library(
phi_tensor_operants phi_tensor_operants
SRCS tensor_operants.cc SRCS tensor_operants.cc
DEPS phi_function_api) DEPS phi_function_api)
cc_library(
operants_manager
SRCS operants_manager.cc
DEPS flags)
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
#include "paddle/phi/core/operants_manager.h" #include "paddle/phi/api/include/operants_manager.h"
#include "gflags/gflags.h" #include "gflags/gflags.h"
#include "glog/logging.h" #include "glog/logging.h"
...@@ -23,8 +23,6 @@ DECLARE_string(tensor_operants_mode); ...@@ -23,8 +23,6 @@ DECLARE_string(tensor_operants_mode);
namespace paddle { namespace paddle {
namespace operants {
OperantsManager& OperantsManager::Instance() { OperantsManager& OperantsManager::Instance() {
static OperantsManager g_op_manager; static OperantsManager g_op_manager;
return g_op_manager; return g_op_manager;
...@@ -63,5 +61,4 @@ Tensor OperantsManager::multiply(const Tensor& x, const Tensor& y) { ...@@ -63,5 +61,4 @@ Tensor OperantsManager::multiply(const Tensor& x, const Tensor& y) {
} }
} }
} // namespace operants
} // namespace paddle } // namespace paddle
...@@ -21,13 +21,13 @@ limitations under the License. */ ...@@ -21,13 +21,13 @@ limitations under the License. */
#include "glog/logging.h" #include "glog/logging.h"
#include "paddle/phi/api/include/context_pool.h" #include "paddle/phi/api/include/context_pool.h"
#include "paddle/phi/api/include/operants_manager.h"
#include "paddle/phi/api/lib/utils/allocator.h" #include "paddle/phi/api/lib/utils/allocator.h"
#include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/backends/gpu/gpu_info.h" #include "paddle/phi/backends/gpu/gpu_info.h"
#include "paddle/phi/core/ddim.h" #include "paddle/phi/core/ddim.h"
#include "paddle/phi/core/dense_tensor.h" #include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/enforce.h" #include "paddle/phi/core/enforce.h"
#include "paddle/phi/core/operants_manager.h"
#include "paddle/phi/core/selected_rows.h" #include "paddle/phi/core/selected_rows.h"
#include "paddle/phi/core/sparse_coo_tensor.h" #include "paddle/phi/core/sparse_coo_tensor.h"
#include "paddle/phi/core/sparse_csr_tensor.h" #include "paddle/phi/core/sparse_csr_tensor.h"
...@@ -435,7 +435,7 @@ void Tensor::reset_inplace_version(bool set_to_zero) { ...@@ -435,7 +435,7 @@ void Tensor::reset_inplace_version(bool set_to_zero) {
} }
PADDLE_API Tensor operator*(const Tensor &x, const Tensor &y) { PADDLE_API Tensor operator*(const Tensor &x, const Tensor &y) {
return paddle::operants::OperantsManager::Instance().multiply(x, y); return paddle::OperantsManager::Instance().multiply(x, y);
} }
} // namespace experimental } // namespace experimental
......
...@@ -114,11 +114,6 @@ cc_library( ...@@ -114,11 +114,6 @@ cc_library(
SRCS custom_kernel.cc SRCS custom_kernel.cc
DEPS kernel_factory) DEPS kernel_factory)
cc_library(
operants_manager
SRCS operants_manager.cc
DEPS phi_enforce)
cc_library( cc_library(
mixed_vector mixed_vector
SRCS mixed_vector.cc SRCS mixed_vector.cc
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册