未验证 提交 138bdf40 编写于 作者: C cyberslack_lee 提交者: GitHub

[clang-tidy] No.26,27 enable misc-unused-using-decls,misc-unused-alias-decls (#56485)

* fix

* fix
上级 cc9e8699
...@@ -163,8 +163,8 @@ cppcoreguidelines-c-copy-assignment-signature, ...@@ -163,8 +163,8 @@ cppcoreguidelines-c-copy-assignment-signature,
-cppcoreguidelines-slicing, -cppcoreguidelines-slicing,
-hicpp-avoid-goto, -hicpp-avoid-goto,
-hicpp-exception-baseclass, -hicpp-exception-baseclass,
-misc-unused-alias-decls, misc-unused-alias-decls,
-misc-unused-using-decls, misc-unused-using-decls,
modernize-avoid-bind, modernize-avoid-bind,
modernize-avoid-c-arrays, modernize-avoid-c-arrays,
-modernize-deprecated-headers, -modernize-deprecated-headers,
......
...@@ -18,8 +18,6 @@ namespace paddle { ...@@ -18,8 +18,6 @@ namespace paddle {
namespace distributed { namespace distributed {
namespace auto_parallel { namespace auto_parallel {
using phi::distributed::auto_parallel::str_join;
std::pair<std::vector<TensorDistAttr>, std::vector<TensorDistAttr>> std::pair<std::vector<TensorDistAttr>, std::vector<TensorDistAttr>>
ReplicatedSPMDRule::InferForward(const std::vector<DistTensorSpec>& input_specs, ReplicatedSPMDRule::InferForward(const std::vector<DistTensorSpec>& input_specs,
const paddle::framework::AttributeMap& attrs) { const paddle::framework::AttributeMap& attrs) {
......
...@@ -24,9 +24,6 @@ namespace paddle { ...@@ -24,9 +24,6 @@ namespace paddle {
namespace framework { namespace framework {
namespace ir { namespace ir {
using string::PrettyLog;
using string::Style;
size_t PDPattern::id_ = 0UL; size_t PDPattern::id_ = 0UL;
#ifdef PADDLE_WITH_TENSORRT #ifdef PADDLE_WITH_TENSORRT
......
...@@ -27,7 +27,6 @@ namespace framework { ...@@ -27,7 +27,6 @@ namespace framework {
namespace ir { namespace ir {
using paddle::operators::OpAndGradOpPair; using paddle::operators::OpAndGradOpPair;
using paddle::operators::OpVariant;
using paddle::operators::OpVariantSet; using paddle::operators::OpVariantSet;
void RecurrentOpEagerDeletionPass::ApplyImpl(Graph *graph) const { void RecurrentOpEagerDeletionPass::ApplyImpl(Graph *graph) const {
......
...@@ -33,7 +33,6 @@ ...@@ -33,7 +33,6 @@
namespace paddle { namespace paddle {
namespace inference { namespace inference {
namespace analysis { namespace analysis {
using string::PrettyLog;
using string::PrettyLogEndl; using string::PrettyLogEndl;
using string::Style; using string::Style;
......
...@@ -154,7 +154,6 @@ void UpdatePrivateDeviceContext(InferGPUContext *gpu_context, ...@@ -154,7 +154,6 @@ void UpdatePrivateDeviceContext(InferGPUContext *gpu_context,
#endif #endif
} // namespace } // namespace
using inference::Singleton;
#ifdef PADDLE_WITH_TENSORRT #ifdef PADDLE_WITH_TENSORRT
using inference::tensorrt::TRTCalibratorEngine; using inference::tensorrt::TRTCalibratorEngine;
using inference::tensorrt::TRTCalibratorEngineManager; using inference::tensorrt::TRTCalibratorEngineManager;
...@@ -2527,6 +2526,7 @@ void AnalysisPredictor::ClearIntermediateTensor() { ...@@ -2527,6 +2526,7 @@ void AnalysisPredictor::ClearIntermediateTensor() {
} }
#ifdef PADDLE_WITH_TENSORRT #ifdef PADDLE_WITH_TENSORRT
using inference::Singleton;
bool AnalysisPredictor::SaveTrtCalibToDisk() { bool AnalysisPredictor::SaveTrtCalibToDisk() {
PADDLE_ENFORCE_EQ(config_.tensorrt_engine_enabled(), PADDLE_ENFORCE_EQ(config_.tensorrt_engine_enabled(),
true, true,
......
...@@ -37,7 +37,6 @@ namespace paddle { ...@@ -37,7 +37,6 @@ namespace paddle {
using framework::Variable; using framework::Variable;
using framework::ir::Graph; using framework::ir::Graph;
using phi::CPUPlace;
using ConstEigenVectorArrayMap = using ConstEigenVectorArrayMap =
Eigen::Map<const Eigen::Array<float, Eigen::Dynamic, 1>>; Eigen::Map<const Eigen::Array<float, Eigen::Dynamic, 1>>;
using EigenMatrixDoubleArray = using EigenMatrixDoubleArray =
......
...@@ -338,7 +338,6 @@ DEFINE_ACTIVATION_CPU_KERNEL(SoftRelu, SoftReluFunctor, SoftReluGradFunctor) ...@@ -338,7 +338,6 @@ DEFINE_ACTIVATION_CPU_KERNEL(SoftRelu, SoftReluFunctor, SoftReluGradFunctor)
} // namespace paddle } // namespace paddle
namespace ops = paddle::operators; namespace ops = paddle::operators;
namespace plat = paddle::platform;
#define REGISTER_ACTIVATION_OP(KERNEL_TYPE, OP_NAME, functor, grad_functor) \ #define REGISTER_ACTIVATION_OP(KERNEL_TYPE, OP_NAME, functor, grad_functor) \
REGISTER_OPERATOR( \ REGISTER_OPERATOR( \
......
...@@ -111,7 +111,6 @@ class AddPositionEncodingGradOpMaker : public framework::SingleGradOpMaker<T> { ...@@ -111,7 +111,6 @@ class AddPositionEncodingGradOpMaker : public framework::SingleGradOpMaker<T> {
} // namespace paddle } // namespace paddle
namespace ops = paddle::operators; namespace ops = paddle::operators;
namespace plt = paddle::platform;
REGISTER_OPERATOR( REGISTER_OPERATOR(
add_position_encoding, add_position_encoding,
......
...@@ -75,7 +75,6 @@ specially counter orderingly. ...@@ -75,7 +75,6 @@ specially counter orderingly.
} // namespace paddle } // namespace paddle
namespace ops = paddle::operators; namespace ops = paddle::operators;
namespace plat = paddle::platform;
REGISTER_OP_WITHOUT_GRADIENT(assign_pos, REGISTER_OP_WITHOUT_GRADIENT(assign_pos,
ops::AssignPosOp, ops::AssignPosOp,
......
...@@ -145,7 +145,6 @@ DECLARE_NO_NEED_BUFFER_VARS_INFERER(CenterLossGradNoNeedBufVarsInferer, "X"); ...@@ -145,7 +145,6 @@ DECLARE_NO_NEED_BUFFER_VARS_INFERER(CenterLossGradNoNeedBufVarsInferer, "X");
} // namespace paddle } // namespace paddle
namespace ops = paddle::operators; namespace ops = paddle::operators;
namespace plat = paddle::platform;
REGISTER_OPERATOR(center_loss, REGISTER_OPERATOR(center_loss,
ops::CenterLossOp, ops::CenterLossOp,
......
...@@ -15,7 +15,6 @@ limitations under the License. */ ...@@ -15,7 +15,6 @@ limitations under the License. */
#include "paddle/fluid/operators/flatten_op.h" #include "paddle/fluid/operators/flatten_op.h"
namespace ops = paddle::operators; namespace ops = paddle::operators;
namespace plat = paddle::platform;
REGISTER_OP_CUDA_KERNEL(flatten, REGISTER_OP_CUDA_KERNEL(flatten,
ops::FlattenKernel<phi::GPUContext, float>, ops::FlattenKernel<phi::GPUContext, float>,
......
...@@ -281,7 +281,6 @@ class FusedSeqpoolCVMGradOpMaker : public framework::SingleGradOpMaker<T> { ...@@ -281,7 +281,6 @@ class FusedSeqpoolCVMGradOpMaker : public framework::SingleGradOpMaker<T> {
} // namespace paddle } // namespace paddle
namespace ops = paddle::operators; namespace ops = paddle::operators;
namespace plat = paddle::platform;
REGISTER_OPERATOR(fused_seqpool_cvm, REGISTER_OPERATOR(fused_seqpool_cvm,
ops::FusedSeqpoolCVMOp, ops::FusedSeqpoolCVMOp,
......
...@@ -81,7 +81,6 @@ class LimitByCapacityOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -81,7 +81,6 @@ class LimitByCapacityOpMaker : public framework::OpProtoAndCheckerMaker {
} // namespace paddle } // namespace paddle
namespace ops = paddle::operators; namespace ops = paddle::operators;
namespace plat = paddle::platform;
REGISTER_OP_WITHOUT_GRADIENT(limit_by_capacity, REGISTER_OP_WITHOUT_GRADIENT(limit_by_capacity,
ops::LimitByCapacityOp, ops::LimitByCapacityOp,
......
...@@ -106,7 +106,6 @@ raise error if the type is not listed above. ...@@ -106,7 +106,6 @@ raise error if the type is not listed above.
} // namespace paddle } // namespace paddle
namespace ops = paddle::operators; namespace ops = paddle::operators;
namespace plat = paddle::platform;
DECLARE_INFER_SHAPE_FUNCTOR(memcpy_d2h, DECLARE_INFER_SHAPE_FUNCTOR(memcpy_d2h,
MemcpyD2HInferShapeFunctor, MemcpyD2HInferShapeFunctor,
......
...@@ -108,7 +108,6 @@ raise error if the type is not listed above. ...@@ -108,7 +108,6 @@ raise error if the type is not listed above.
} // namespace paddle } // namespace paddle
namespace ops = paddle::operators; namespace ops = paddle::operators;
namespace plat = paddle::platform;
DECLARE_INFER_SHAPE_FUNCTOR(memcpy_h2d, DECLARE_INFER_SHAPE_FUNCTOR(memcpy_h2d,
MemcpyH2DInferShapeFunctor, MemcpyH2DInferShapeFunctor,
......
...@@ -127,7 +127,6 @@ raise error if the type is not listed above. ...@@ -127,7 +127,6 @@ raise error if the type is not listed above.
} // namespace paddle } // namespace paddle
namespace ops = paddle::operators; namespace ops = paddle::operators;
namespace plat = paddle::platform;
DECLARE_INFER_SHAPE_FUNCTOR(memcpy, DECLARE_INFER_SHAPE_FUNCTOR(memcpy,
MemcpyInferShapeFunctor, MemcpyInferShapeFunctor,
......
...@@ -19,11 +19,7 @@ namespace paddle { ...@@ -19,11 +19,7 @@ namespace paddle {
namespace operators { namespace operators {
using dnnl::memory; using dnnl::memory;
using dnnl::primitive;
using dnnl::reorder;
using dnnl::resampling_forward; using dnnl::resampling_forward;
using dnnl::stream;
using phi::DataLayout;
using OneDNNMemoryFormat = dnnl::memory::format_tag; using OneDNNMemoryFormat = dnnl::memory::format_tag;
template <typename T = float> template <typename T = float>
......
...@@ -23,10 +23,6 @@ namespace paddle { ...@@ -23,10 +23,6 @@ namespace paddle {
namespace operators { namespace operators {
using dnnl::memory; using dnnl::memory;
using dnnl::primitive;
using dnnl::reorder;
using dnnl::stream;
using phi::DataLayout;
template <typename T> template <typename T>
class QuantOpKernel : public framework::OpKernel<T> { class QuantOpKernel : public framework::OpKernel<T> {
......
...@@ -23,7 +23,6 @@ namespace paddle { ...@@ -23,7 +23,6 @@ namespace paddle {
namespace operators { namespace operators {
using dnnl::memory; using dnnl::memory;
using dnnl::reorder;
namespace { namespace {
......
...@@ -56,7 +56,6 @@ class NumberCountOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -56,7 +56,6 @@ class NumberCountOpMaker : public framework::OpProtoAndCheckerMaker {
} // namespace paddle } // namespace paddle
namespace ops = paddle::operators; namespace ops = paddle::operators;
namespace plat = paddle::platform;
REGISTER_OP_WITHOUT_GRADIENT(number_count, REGISTER_OP_WITHOUT_GRADIENT(number_count,
ops::NumberCountOp, ops::NumberCountOp,
......
...@@ -89,7 +89,7 @@ $$ ...@@ -89,7 +89,7 @@ $$
} // namespace paddle } // namespace paddle
namespace ops = paddle::operators; namespace ops = paddle::operators;
namespace ops = paddle::operators;
DECLARE_INFER_SHAPE_FUNCTOR(adadelta, DECLARE_INFER_SHAPE_FUNCTOR(adadelta,
AdadeltaInferMetaFunctor, AdadeltaInferMetaFunctor,
PD_INFER_META(phi::AdadeltaInferMeta)); PD_INFER_META(phi::AdadeltaInferMeta));
......
...@@ -112,7 +112,6 @@ class DistributedFusedLambInitOpMaker ...@@ -112,7 +112,6 @@ class DistributedFusedLambInitOpMaker
} // namespace paddle } // namespace paddle
namespace ops = paddle::operators; namespace ops = paddle::operators;
namespace plat = paddle::platform;
REGISTER_OP_WITHOUT_GRADIENT(distributed_fused_lamb_init, REGISTER_OP_WITHOUT_GRADIENT(distributed_fused_lamb_init,
ops::DistributedFusedLambInitOp, ops::DistributedFusedLambInitOp,
......
...@@ -165,7 +165,6 @@ class DistributedFusedLambOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -165,7 +165,6 @@ class DistributedFusedLambOpMaker : public framework::OpProtoAndCheckerMaker {
} // namespace operators } // namespace operators
} // namespace paddle } // namespace paddle
namespace plat = paddle::platform;
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OP_WITHOUT_GRADIENT(distributed_fused_lamb, REGISTER_OP_WITHOUT_GRADIENT(distributed_fused_lamb,
ops::DistributedFusedLambOp, ops::DistributedFusedLambOp,
......
...@@ -65,7 +65,6 @@ class PSendArrayOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -65,7 +65,6 @@ class PSendArrayOpMaker : public framework::OpProtoAndCheckerMaker {
} // namespace paddle } // namespace paddle
namespace ops = paddle::operators; namespace ops = paddle::operators;
namespace plat = paddle::platform;
DECLARE_INFER_SHAPE_FUNCTOR(p_send, DECLARE_INFER_SHAPE_FUNCTOR(p_send,
PSendInferShapeFunctor, PSendInferShapeFunctor,
......
...@@ -34,7 +34,6 @@ class OpBase; ...@@ -34,7 +34,6 @@ class OpBase;
namespace paddle { namespace paddle {
namespace operators { namespace operators {
using framework::GradVarName;
#define CLOG std::cout #define CLOG std::cout
......
...@@ -575,8 +575,7 @@ class CPUPyramidHashOPGradKernel : public framework::OpKernel<T> { ...@@ -575,8 +575,7 @@ class CPUPyramidHashOPGradKernel : public framework::OpKernel<T> {
} // namespace paddle } // namespace paddle
namespace ops = paddle::operators; namespace ops = paddle::operators;
namespace plt = paddle::platform;
namespace frm = paddle::framework;
REGISTER_OPERATOR(pyramid_hash, REGISTER_OPERATOR(pyramid_hash,
ops::PyramidHashOP, ops::PyramidHashOP,
ops::PyramidHashOpMaker, ops::PyramidHashOpMaker,
......
...@@ -88,7 +88,6 @@ class RandomCropOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -88,7 +88,6 @@ class RandomCropOpMaker : public framework::OpProtoAndCheckerMaker {
} // namespace paddle } // namespace paddle
namespace ops = paddle::operators; namespace ops = paddle::operators;
namespace f = paddle::framework;
REGISTER_OPERATOR( REGISTER_OPERATOR(
random_crop, random_crop,
ops::RandomCropOp, ops::RandomCropOp,
......
...@@ -88,7 +88,6 @@ DECLARE_INPLACE_OP_INFERER(RandomRoutingInplaceInferer, {"TopK_Idx", "Out"}); ...@@ -88,7 +88,6 @@ DECLARE_INPLACE_OP_INFERER(RandomRoutingInplaceInferer, {"TopK_Idx", "Out"});
} // namespace paddle } // namespace paddle
namespace ops = paddle::operators; namespace ops = paddle::operators;
namespace plat = paddle::platform;
REGISTER_OPERATOR( REGISTER_OPERATOR(
random_routing, random_routing,
......
...@@ -236,7 +236,6 @@ DECLARE_INPLACE_OP_INFERER(SetValueOpInplaceInferer, {"Input", "Out"}); ...@@ -236,7 +236,6 @@ DECLARE_INPLACE_OP_INFERER(SetValueOpInplaceInferer, {"Input", "Out"});
} // namespace paddle } // namespace paddle
namespace ops = paddle::operators; namespace ops = paddle::operators;
namespace plat = paddle::platform;
DECLARE_INFER_SHAPE_FUNCTOR(set_value, DECLARE_INFER_SHAPE_FUNCTOR(set_value,
SetValueInferShapeFunctor, SetValueInferShapeFunctor,
......
...@@ -29,15 +29,8 @@ limitations under the License. */ ...@@ -29,15 +29,8 @@ limitations under the License. */
namespace paddle { namespace paddle {
namespace operators { namespace operators {
using std::bad_cast;
using std::codecvt_utf8;
using std::endl;
using std::exception;
using std::ifstream; using std::ifstream;
using std::int64_t; using std::int64_t;
using std::min;
using std::runtime_error;
using std::shared_ptr;
using std::size_t; using std::size_t;
using std::string; using std::string;
using std::unordered_map; using std::unordered_map;
......
...@@ -243,8 +243,6 @@ DECLARE_INPLACE_OP_INFERER(SumInplaceInferer, {"X", "Out"}); ...@@ -243,8 +243,6 @@ DECLARE_INPLACE_OP_INFERER(SumInplaceInferer, {"X", "Out"});
} // namespace operators } // namespace operators
} // namespace paddle } // namespace paddle
namespace ops = paddle::operators;
namespace ops = paddle::operators; namespace ops = paddle::operators;
DECLARE_INFER_SHAPE_FUNCTOR(sum, DECLARE_INFER_SHAPE_FUNCTOR(sum,
AddNInferShapeFunctor, AddNInferShapeFunctor,
......
...@@ -118,7 +118,6 @@ class TransferLayoutOpProtoMaker : public framework::OpProtoAndCheckerMaker { ...@@ -118,7 +118,6 @@ class TransferLayoutOpProtoMaker : public framework::OpProtoAndCheckerMaker {
} // namespace paddle } // namespace paddle
namespace ops = paddle::operators; namespace ops = paddle::operators;
namespace plat = paddle::platform;
DECLARE_INFER_SHAPE_FUNCTOR(transfer_layout, DECLARE_INFER_SHAPE_FUNCTOR(transfer_layout,
TransferLayoutInferShapeFunctor, TransferLayoutInferShapeFunctor,
PD_INFER_META(phi::TransferLayoutInferMeta)); PD_INFER_META(phi::TransferLayoutInferMeta));
......
...@@ -523,8 +523,7 @@ class CPUVarConv2dOPGradKernel : public framework::OpKernel<T> { ...@@ -523,8 +523,7 @@ class CPUVarConv2dOPGradKernel : public framework::OpKernel<T> {
} // namespace paddle } // namespace paddle
namespace ops = paddle::operators; namespace ops = paddle::operators;
namespace plt = paddle::platform;
namespace frm = paddle::framework;
REGISTER_OPERATOR(var_conv_2d, REGISTER_OPERATOR(var_conv_2d,
ops::VarConv2dOP, ops::VarConv2dOP,
ops::VarConv2dOpMaker, ops::VarConv2dOpMaker,
......
...@@ -39,7 +39,6 @@ limitations under the License. */ ...@@ -39,7 +39,6 @@ limitations under the License. */
#include "paddle/fluid/pybind/data_set_py.h" #include "paddle/fluid/pybind/data_set_py.h"
namespace py = pybind11; namespace py = pybind11;
namespace pd = paddle::framework;
namespace paddle { namespace paddle {
namespace pybind { namespace pybind {
......
...@@ -54,8 +54,6 @@ limitations under the License. */ ...@@ -54,8 +54,6 @@ limitations under the License. */
#include "paddle/phi/kernels/sync_batch_norm_kernel.h" #include "paddle/phi/kernels/sync_batch_norm_kernel.h"
namespace py = pybind11;
namespace paddle { namespace paddle {
namespace pybind { namespace pybind {
......
...@@ -43,8 +43,6 @@ limitations under the License. */ ...@@ -43,8 +43,6 @@ limitations under the License. */
namespace paddle { namespace paddle {
namespace pybind { namespace pybind {
namespace py = ::pybind11;
PyTypeObject* p_pylayer_type; PyTypeObject* p_pylayer_type;
extern PyTypeObject* p_tensor_type; extern PyTypeObject* p_tensor_type;
......
...@@ -29,8 +29,6 @@ limitations under the License. */ ...@@ -29,8 +29,6 @@ limitations under the License. */
#include "paddle/fluid/framework/fleet/heter_wrapper.h" #include "paddle/fluid/framework/fleet/heter_wrapper.h"
#include "paddle/fluid/pybind/heter_wrapper_py.h" #include "paddle/fluid/pybind/heter_wrapper_py.h"
namespace py = pybind11;
namespace paddle { namespace paddle {
namespace pybind { namespace pybind {
#if defined(PADDLE_WITH_PSLIB) && !defined(PADDLE_WITH_HETERPS) #if defined(PADDLE_WITH_PSLIB) && !defined(PADDLE_WITH_HETERPS)
......
...@@ -30,8 +30,6 @@ limitations under the License. */ ...@@ -30,8 +30,6 @@ limitations under the License. */
#include "paddle/fluid/pybind/metrics_py.h" #include "paddle/fluid/pybind/metrics_py.h"
namespace py = pybind11;
#if defined(PADDLE_WITH_PSLIB) #if defined(PADDLE_WITH_PSLIB)
namespace paddle { namespace paddle {
namespace pybind { namespace pybind {
......
...@@ -37,7 +37,6 @@ limitations under the License. */ ...@@ -37,7 +37,6 @@ limitations under the License. */
#include "paddle/fluid/pybind/nccl_wrapper_py.h" #include "paddle/fluid/pybind/nccl_wrapper_py.h"
namespace py = pybind11; namespace py = pybind11;
namespace pd = paddle::framework;
namespace paddle { namespace paddle {
namespace pybind { namespace pybind {
......
...@@ -35,7 +35,6 @@ ...@@ -35,7 +35,6 @@
#include "paddle/fluid/pybind/imperative.h" #include "paddle/fluid/pybind/imperative.h"
#include "paddle/phi/common/complex.h" #include "paddle/phi/common/complex.h"
namespace py = pybind11;
namespace paddle { namespace paddle {
namespace pybind { namespace pybind {
......
...@@ -44,9 +44,6 @@ PyTypeObject *g_blockdesc_pytype = nullptr; ...@@ -44,9 +44,6 @@ PyTypeObject *g_blockdesc_pytype = nullptr;
namespace pd = paddle::framework; namespace pd = paddle::framework;
namespace jit = paddle::jit; namespace jit = paddle::jit;
using paddle::distributed::TensorDistAttr;
using paddle::distributed::auto_parallel::OperatorDistAttr;
template <typename T> template <typename T>
static pybind11::bytes SerializeMessage( static pybind11::bytes SerializeMessage(
T &self) { // NOLINT due to pybind11 convention. T &self) { // NOLINT due to pybind11 convention.
......
...@@ -29,8 +29,6 @@ limitations under the License. */ ...@@ -29,8 +29,6 @@ limitations under the License. */
#include "paddle/fluid/framework/fleet/ps_gpu_wrapper.h" #include "paddle/fluid/framework/fleet/ps_gpu_wrapper.h"
#include "paddle/fluid/pybind/ps_gpu_wrapper_py.h" #include "paddle/fluid/pybind/ps_gpu_wrapper_py.h"
namespace py = pybind11;
namespace paddle { namespace paddle {
namespace pybind { namespace pybind {
#ifdef PADDLE_WITH_HETERPS #ifdef PADDLE_WITH_HETERPS
......
...@@ -18,12 +18,7 @@ ...@@ -18,12 +18,7 @@
#include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/tensor_utils.h" #include "paddle/phi/core/tensor_utils.h"
using dnnl::engine;
using dnnl::inner_product_forward;
using dnnl::memory; using dnnl::memory;
using dnnl::prop_kind;
using dnnl::stream;
using phi::ReshapeToMatrix;
namespace phi { namespace phi {
namespace fusion { namespace fusion {
......
...@@ -23,7 +23,6 @@ using dnnl::engine; ...@@ -23,7 +23,6 @@ using dnnl::engine;
using dnnl::inner_product_forward; using dnnl::inner_product_forward;
using dnnl::memory; using dnnl::memory;
using dnnl::prop_kind; using dnnl::prop_kind;
using dnnl::stream;
using phi::ReshapeToMatrix; using phi::ReshapeToMatrix;
namespace phi { namespace phi {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册