未验证 提交 d30d85da 编写于 作者: C Chen Weihang 提交者: GitHub

[Phi] Replace all prefix PT by PD and fix typo (#40046)

* replace prefix pt by pd

* replace added kernel

* revert util change

* pd kernel to phi

* resolve conflict

* resolve conflict
上级 7e076e7b
......@@ -29,7 +29,7 @@ namespace framework {
phi::InferMetaContext BuildInferMetaContext(InferShapeContext* ctx,
const std::string& op_type);
#define DELCARE_INFER_SHAPE_FUNCTOR(op_type, functor_name, fn) \
#define DECLARE_INFER_SHAPE_FUNCTOR(op_type, functor_name, fn) \
struct functor_name : public paddle::framework::InferShapeBase { \
void operator()( \
paddle::framework::InferShapeContext* ctx) const override { \
......
......@@ -110,9 +110,9 @@ void InferShapeUtilsTestKernel(
} // namespace framework
} // namespace paddle
DELCARE_INFER_SHAPE_FUNCTOR(infer_shape_utils_test,
DECLARE_INFER_SHAPE_FUNCTOR(infer_shape_utils_test,
InferShapeUtilsTestInferShapeFunctor,
PT_INFER_META(paddle::framework::TestInferMeta));
PD_INFER_META(paddle::framework::TestInferMeta));
REGISTER_OPERATOR(infer_shape_utils_test,
paddle::framework::InferShapeUtilsTestOp,
paddle::framework::InferShapeUtilsTestOpMaker,
......
......@@ -141,8 +141,8 @@ class AbsDoubleGradOp : public framework::OperatorWithKernel {
} // namespace operators
} // namespace paddle
DELCARE_INFER_SHAPE_FUNCTOR(abs, AbsInferShapeFunctor,
PT_INFER_META(phi::UnchangedInferMeta));
DECLARE_INFER_SHAPE_FUNCTOR(abs, AbsInferShapeFunctor,
PD_INFER_META(phi::UnchangedInferMeta));
namespace ops = paddle::operators;
......
......@@ -147,8 +147,8 @@ class AddMMOpGradMaker : public framework::SingleGradOpMaker<T> {
} // namespace paddle
namespace ops = paddle::operators;
DELCARE_INFER_SHAPE_FUNCTOR(addmm, AddmmInferShapeFunctor,
PT_INFER_META(phi::AddmmInferMeta));
DECLARE_INFER_SHAPE_FUNCTOR(addmm, AddmmInferShapeFunctor,
PD_INFER_META(phi::AddmmInferMeta));
REGISTER_OPERATOR(addmm, ops::AddMMOp, ops::AddMMOpMaker,
ops::AddMMOpGradMaker<paddle::framework::OpDesc>,
ops::AddMMOpGradMaker<paddle::imperative::OpBase>,
......
......@@ -105,8 +105,8 @@ class Atan2OpVarTypeInference : public framework::VarTypeInference {
} // namespace paddle
namespace ops = paddle::operators;
DELCARE_INFER_SHAPE_FUNCTOR(atan2, Atan2InferShapeFunctor,
PT_INFER_META(phi::Atan2InferMeta));
DECLARE_INFER_SHAPE_FUNCTOR(atan2, Atan2InferShapeFunctor,
PD_INFER_META(phi::Atan2InferMeta));
REGISTER_OPERATOR(atan2, ops::Atan2Op, ops::Atan2OpMaker,
ops::Atan2GradMaker<paddle::framework::OpDesc>,
ops::Atan2GradMaker<paddle::imperative::OpBase>,
......
......@@ -138,8 +138,8 @@ DECLARE_INPLACE_OP_INFERER(BCELossGradInplaceInferer,
} // namespace paddle
namespace ops = paddle::operators;
DELCARE_INFER_SHAPE_FUNCTOR(bce_loss, BCELossInferShapeFunctor,
PT_INFER_META(phi::BCELossInferMeta));
DECLARE_INFER_SHAPE_FUNCTOR(bce_loss, BCELossInferShapeFunctor,
PD_INFER_META(phi::BCELossInferMeta));
REGISTER_OPERATOR(bce_loss, ops::BCELossOp, ops::BCELossOpMaker,
ops::BCELossGradOpMaker<paddle::framework::OpDesc>,
......
......@@ -90,12 +90,12 @@ class BilinearTensorProductGradOpMaker
namespace ops = paddle::operators;
DELCARE_INFER_SHAPE_FUNCTOR(bilinear_tensor_product,
DECLARE_INFER_SHAPE_FUNCTOR(bilinear_tensor_product,
BilinearTensorProductInferShapeFunctor,
PT_INFER_META(phi::BilinearTensorProductInferMeta));
DELCARE_INFER_SHAPE_FUNCTOR(
PD_INFER_META(phi::BilinearTensorProductInferMeta));
DECLARE_INFER_SHAPE_FUNCTOR(
bilinear_tensor_product_grad, BilinearTensorProductGradInferShapeFunctor,
PT_INFER_META(phi::BilinearTensorProductGradInferMeta));
PD_INFER_META(phi::BilinearTensorProductGradInferMeta));
REGISTER_OPERATOR(
bilinear_tensor_product, ops::BilinearTensorProductOp,
......
......@@ -167,9 +167,9 @@ DECLARE_NO_NEED_BUFFER_VARS_INFERER(BroadcastTensorsGradNoNeedBufVarsInferer,
namespace ops = paddle::operators;
namespace plat = paddle::platform;
DELCARE_INFER_SHAPE_FUNCTOR(broadcast_tensors,
DECLARE_INFER_SHAPE_FUNCTOR(broadcast_tensors,
BroadcastTensorsInferShapeFunctor,
PT_INFER_META(phi::BroadcastTensorsInferMeta));
PD_INFER_META(phi::BroadcastTensorsInferMeta));
REGISTER_OPERATOR(broadcast_tensors, ops::BroadcastTensorsOp,
ops::BroadcastTensorsOpMaker,
......
......@@ -90,8 +90,8 @@ class CholeskyGradOpMaker : public framework::SingleGradOpMaker<T> {
} // namespace paddle
namespace ops = paddle::operators;
DELCARE_INFER_SHAPE_FUNCTOR(cholesky, CholeskyInferShapeFunctor,
PT_INFER_META(phi::CholeskyInferMeta));
DECLARE_INFER_SHAPE_FUNCTOR(cholesky, CholeskyInferShapeFunctor,
PD_INFER_META(phi::CholeskyInferMeta));
REGISTER_OPERATOR(cholesky, ops::CholeskyOp, ops::CholeskyOpMaker,
ops::CholeskyGradOpMaker<paddle::framework::OpDesc>,
ops::CholeskyGradOpMaker<paddle::imperative::OpBase>,
......
......@@ -205,8 +205,8 @@ class ConcatDoubleGradOpMaker : public framework::SingleGradOpMaker<T> {
namespace ops = paddle::operators;
DELCARE_INFER_SHAPE_FUNCTOR(concat, ConcatInferShapeFunctor,
PT_INFER_META(phi::ConcatInferMeta));
DECLARE_INFER_SHAPE_FUNCTOR(concat, ConcatInferShapeFunctor,
PD_INFER_META(phi::ConcatInferMeta));
REGISTER_OPERATOR(concat, ops::ConcatOp, ops::ConcatOpMaker,
ops::ConcatGradOpMaker<paddle::framework::OpDesc>,
......
......@@ -66,8 +66,8 @@ class ConjGradMaker : public framework::SingleGradOpMaker<T> {
namespace ops = paddle::operators;
DELCARE_INFER_SHAPE_FUNCTOR(conj, ConjInferShapeFunctor,
PT_INFER_META(phi::UnchangedInferMeta));
DECLARE_INFER_SHAPE_FUNCTOR(conj, ConjInferShapeFunctor,
PD_INFER_META(phi::UnchangedInferMeta));
REGISTER_OPERATOR(conj, ops::ConjOp, ops::ConjOpMaker,
ops::ConjGradMaker<paddle::framework::OpDesc>,
ops::ConjGradMaker<paddle::imperative::OpBase>,
......
......@@ -58,8 +58,8 @@ class CompareReduceOp : public framework::OperatorWithKernel {
}; \
char _##op_type##Comment::type[]{#op_type}; \
char _##op_type##Comment::equation[]{_equation}; \
DELCARE_INFER_SHAPE_FUNCTOR(op_type, op_type##_InferShapeFunctor, \
PT_INFER_META(phi::CompareAllInferMeta)); \
DECLARE_INFER_SHAPE_FUNCTOR(op_type, op_type##_InferShapeFunctor, \
PD_INFER_META(phi::CompareAllInferMeta)); \
REGISTER_OPERATOR( \
op_type, ::paddle::operators::CompareReduceOp<_##op_type##Comment>, \
::paddle::operators::CompareReduceOpProtoMaker<_##op_type##Comment>, \
......
......@@ -96,8 +96,8 @@ class CompareOp : public framework::OperatorWithKernel {
}; \
char _##op_type##Comment::type[]{#op_type}; \
char _##op_type##Comment::equation[]{_equation}; \
DELCARE_INFER_SHAPE_FUNCTOR(op_type, op_type##_InferShapeFunctor, \
PT_INFER_META(phi::CompareInferMeta)); \
DECLARE_INFER_SHAPE_FUNCTOR(op_type, op_type##_InferShapeFunctor, \
PD_INFER_META(phi::CompareInferMeta)); \
REGISTER_OPERATOR( \
op_type, ::paddle::operators::CompareOp<_##op_type##Comment>, \
::paddle::operators::CompareOpProtoMaker<_##op_type##Comment>, \
......
......@@ -109,8 +109,8 @@ class CrossGradMaker : public framework::SingleGradOpMaker<T> {
} // namespace paddle
namespace ops = paddle::operators;
DELCARE_INFER_SHAPE_FUNCTOR(cross, CrossInferShapeFunctor,
PT_INFER_META(phi::CrossInferMeta));
DECLARE_INFER_SHAPE_FUNCTOR(cross, CrossInferShapeFunctor,
PD_INFER_META(phi::CrossInferMeta));
REGISTER_OPERATOR(cross, ops::CrossOp, ops::CrossOpMaker,
ops::CrossGradMaker<paddle::framework::OpDesc>,
ops::CrossGradMaker<paddle::imperative::OpBase>,
......
......@@ -62,8 +62,8 @@ class DiagV2OpMaker : public framework::OpProtoAndCheckerMaker {
} // namespace paddle
namespace ops = paddle::operators;
DELCARE_INFER_SHAPE_FUNCTOR(diag_v2, DiagInferShapeFunctor,
PT_INFER_META(phi::DiagInferMeta));
DECLARE_INFER_SHAPE_FUNCTOR(diag_v2, DiagInferShapeFunctor,
PD_INFER_META(phi::DiagInferMeta));
REGISTER_OPERATOR(
diag_v2, ops::DiagV2Op, ops::DiagV2OpMaker,
......
......@@ -105,8 +105,8 @@ DECLARE_NO_NEED_BUFFER_VARS_INFERER(DiagonalGradNoNeedBufferVarsInferer,
namespace ops = paddle::operators;
DELCARE_INFER_SHAPE_FUNCTOR(diagonal, DiagonalInferShapeFunctor,
PT_INFER_META(phi::DiagonalInferMeta));
DECLARE_INFER_SHAPE_FUNCTOR(diagonal, DiagonalInferShapeFunctor,
PD_INFER_META(phi::DiagonalInferMeta));
REGISTER_OPERATOR(diagonal, ops::DiagonalOp, ops::DiagonalOpMaker,
ops::DiagonalGradOpMaker<paddle::framework::OpDesc>,
......
......@@ -124,8 +124,8 @@ class DistGradOpMaker : public framework::SingleGradOpMaker<T> {
} // namespace paddle
namespace ops = paddle::operators;
DELCARE_INFER_SHAPE_FUNCTOR(dist, DistInferShapeFunctor,
PT_INFER_META(phi::DistInferMeta));
DECLARE_INFER_SHAPE_FUNCTOR(dist, DistInferShapeFunctor,
PD_INFER_META(phi::DistInferMeta));
REGISTER_OPERATOR(dist, ops::DistOp, ops::DistOpMaker,
ops::DistGradOpMaker<paddle::framework::OpDesc>,
......
......@@ -101,8 +101,8 @@ class DotOpGradMaker : public framework::SingleGradOpMaker<T> {
namespace ops = paddle::operators;
DELCARE_INFER_SHAPE_FUNCTOR(dot, DotInferShapeFunctor,
PT_INFER_META(phi::DotInferMeta));
DECLARE_INFER_SHAPE_FUNCTOR(dot, DotInferShapeFunctor,
PD_INFER_META(phi::DotInferMeta));
REGISTER_OPERATOR(dot, ops::DotOp, ops::DotOpMaker,
ops::DotOpGradMaker<paddle::framework::OpDesc>,
......
......@@ -88,8 +88,8 @@ class EmptyOpVarTypeInference : public framework::VarTypeInference {
namespace ops = paddle::operators;
namespace plat = paddle::platform;
DELCARE_INFER_SHAPE_FUNCTOR(empty, EmptyInferShapeFunctor,
PT_INFER_META(phi::CreateInferMeta));
DECLARE_INFER_SHAPE_FUNCTOR(empty, EmptyInferShapeFunctor,
PD_INFER_META(phi::CreateInferMeta));
REGISTER_OP_WITHOUT_GRADIENT(empty, ops::EmptyOp, ops::EmptyOpMaker,
ops::EmptyOpVarTypeInference,
EmptyInferShapeFunctor);
......@@ -73,8 +73,8 @@ DECLARE_INPLACE_OP_INFERER(ErfinvInplaceInferer, {"X", "Out"});
} // namespace operators
} // namespace paddle
DELCARE_INFER_SHAPE_FUNCTOR(erfinv, ErfinvInferShapeFunctor,
PT_INFER_META(phi::UnchangedInferMeta));
DECLARE_INFER_SHAPE_FUNCTOR(erfinv, ErfinvInferShapeFunctor,
PD_INFER_META(phi::UnchangedInferMeta));
REGISTER_OPERATOR(
erfinv, paddle::operators::ErfinvOp, paddle::operators::ErfinvOpMaker,
......
......@@ -67,8 +67,8 @@ Return an identity tensor whose shape is [num_rows, num_columns].
} // namespace paddle
namespace ops = paddle::operators;
DELCARE_INFER_SHAPE_FUNCTOR(eye, EyeInferShapeFunctor,
PT_INFER_META(phi::EyeInferMeta));
DECLARE_INFER_SHAPE_FUNCTOR(eye, EyeInferShapeFunctor,
PD_INFER_META(phi::EyeInferMeta));
REGISTER_OPERATOR(
eye, ops::EyeOp, ops::EyeOpMaker, ops::EyeOpVarTypeInference,
......
......@@ -130,11 +130,11 @@ DECLARE_NO_NEED_BUFFER_VARS_INFERER(GatherNdGradNoNeedBufferVarInferer, "X");
namespace ops = paddle::operators;
DELCARE_INFER_SHAPE_FUNCTOR(gather_nd, GatherNdInferShapeFunctor,
PT_INFER_META(phi::GatherNdInferMeta));
DECLARE_INFER_SHAPE_FUNCTOR(gather_nd, GatherNdInferShapeFunctor,
PD_INFER_META(phi::GatherNdInferMeta));
DELCARE_INFER_SHAPE_FUNCTOR(gather_nd_grad, GatherNdGradInferShapeFunctor,
PT_INFER_META(phi::GatherNdGradInferMeta));
DECLARE_INFER_SHAPE_FUNCTOR(gather_nd_grad, GatherNdGradInferShapeFunctor,
PD_INFER_META(phi::GatherNdGradInferMeta));
REGISTER_OPERATOR(gather_nd, ops::GatherNdOp, ops::GatherNdOpMaker,
ops::GatherNdGradOpMaker<paddle::framework::OpDesc>,
......
......@@ -61,8 +61,8 @@ selected ids.
} // namespace paddle
namespace ops = paddle::operators;
DELCARE_INFER_SHAPE_FUNCTOR(gather_tree, GatherTreeInferShapeFunctor,
PT_INFER_META(phi::GatherTreeMeta));
DECLARE_INFER_SHAPE_FUNCTOR(gather_tree, GatherTreeInferShapeFunctor,
PD_INFER_META(phi::GatherTreeMeta));
REGISTER_OPERATOR(gather_tree, ops::GatherTreeOp, ops::GatherTreeOpMaker,
GatherTreeInferShapeFunctor);
......@@ -90,11 +90,11 @@ class GumbelSoftmaxGradOpMaker : public framework::SingleGradOpMaker<T> {
namespace ops = paddle::operators;
DELCARE_INFER_SHAPE_FUNCTOR(gumbel_softmax, GumbelSoftmaxInferShapeFunctor,
PT_INFER_META(phi::GumbelSoftmaxInferMeta));
DELCARE_INFER_SHAPE_FUNCTOR(gumbel_softmax_grad,
DECLARE_INFER_SHAPE_FUNCTOR(gumbel_softmax, GumbelSoftmaxInferShapeFunctor,
PD_INFER_META(phi::GumbelSoftmaxInferMeta));
DECLARE_INFER_SHAPE_FUNCTOR(gumbel_softmax_grad,
GumbelSoftmaxGradInferShapeFunctor,
PT_INFER_META(phi::GumbelSoftmaxGradInferMeta));
PD_INFER_META(phi::GumbelSoftmaxGradInferMeta));
REGISTER_OPERATOR(gumbel_softmax, ops::GumbelSoftmaxOp,
ops::GumbelSoftmaxOpMaker,
......
......@@ -112,8 +112,8 @@ class HuberLossGradOpMaker : public framework::SingleGradOpMaker<T> {
} // namespace paddle
namespace ops = paddle::operators;
DELCARE_INFER_SHAPE_FUNCTOR(huber_loss, HuberLossInferShapeFunctor,
PT_INFER_META(phi::HuberLossInferMeta));
DECLARE_INFER_SHAPE_FUNCTOR(huber_loss, HuberLossInferShapeFunctor,
PD_INFER_META(phi::HuberLossInferMeta));
REGISTER_OPERATOR(huber_loss, ops::HuberLossOp, ops::HuberLossOpMaker<float>,
ops::HuberLossGradOpMaker<paddle::framework::OpDesc>,
......
......@@ -82,8 +82,8 @@ DECLARE_INPLACE_OP_INFERER(ImagGradOpInplaceInferer,
} // namespace operators
} // namespace paddle
DELCARE_INFER_SHAPE_FUNCTOR(imag, ImagInferShapeFunctor,
PT_INFER_META(phi::RealAndImagInferMeta));
DECLARE_INFER_SHAPE_FUNCTOR(imag, ImagInferShapeFunctor,
PD_INFER_META(phi::RealAndImagInferMeta));
namespace ops = paddle::operators;
......
......@@ -87,8 +87,8 @@ class IncrementGradOpMaker : public framework::SingleGradOpMaker<T> {
} // namespace paddle
namespace ops = paddle::operators;
DELCARE_INFER_SHAPE_FUNCTOR(increment, IncrementInferShapeFunctor,
PT_INFER_META(phi::IncrementInferMeta));
DECLARE_INFER_SHAPE_FUNCTOR(increment, IncrementInferShapeFunctor,
PD_INFER_META(phi::IncrementInferMeta));
REGISTER_OPERATOR(increment, ops::IncrementOp, ops::IncrementOpMaker,
ops::IncrementGradOpMaker<paddle::framework::OpDesc>,
ops::IncrementGradOpMaker<paddle::imperative::OpBase>,
......
......@@ -100,8 +100,8 @@ DECLARE_NO_NEED_BUFFER_VARS_INFERER(IndexSampleGradNoNeedBufferVarInferer, "X");
} // namespace paddle
namespace ops = paddle::operators;
DELCARE_INFER_SHAPE_FUNCTOR(index_sample, IndexSampleInferShapeFunctor,
PT_INFER_META(phi::IndexSampleInferMeta));
DECLARE_INFER_SHAPE_FUNCTOR(index_sample, IndexSampleInferShapeFunctor,
PD_INFER_META(phi::IndexSampleInferMeta));
REGISTER_OPERATOR(index_sample, ops::IndexSampleOp, ops::IndexSampleOpMaker,
ops::IndexSampleGradMaker<paddle::framework::OpDesc>,
ops::IndexSampleGradMaker<paddle::imperative::OpBase>,
......
......@@ -85,8 +85,8 @@ DECLARE_INPLACE_OP_INFERER(LerpInplaceInferer, {"X", "Out"});
} // namespace operators
} // namespace paddle
DELCARE_INFER_SHAPE_FUNCTOR(lerp, LerpInferShapeFunctor,
PT_INFER_META(phi::LerpInferMeta));
DECLARE_INFER_SHAPE_FUNCTOR(lerp, LerpInferShapeFunctor,
PD_INFER_META(phi::LerpInferMeta));
REGISTER_OPERATOR(
lerp, paddle::operators::LerpOp, paddle::operators::LerpOpMaker,
paddle::operators::LerpOpGradMaker<paddle::framework::OpDesc>,
......
......@@ -524,8 +524,8 @@ REGISTER_OPERATOR(matmul_v2, ops::MatMulV2Op, ops::MatMulV2OpMaker,
ops::MatMulV2GradOpMaker<paddle::framework::OpDesc>,
ops::MatMulV2GradOpMaker<paddle::imperative::OpBase>);
DELCARE_INFER_SHAPE_FUNCTOR(matmul_v2_grad, MatMulV2GradInferShapeFunctor,
PT_INFER_META(phi::GeneralBinaryGradInferMeta));
DECLARE_INFER_SHAPE_FUNCTOR(matmul_v2_grad, MatMulV2GradInferShapeFunctor,
PD_INFER_META(phi::GeneralBinaryGradInferMeta));
REGISTER_OPERATOR(matmul_v2_grad, ops::MatMulV2OpGrad,
ops::MatMulV2OpDoubleGradMaker<paddle::framework::OpDesc>,
ops::MatMulV2OpDoubleGradMaker<paddle::imperative::OpBase>,
......
......@@ -53,8 +53,8 @@ class MultinomialOp : public framework::OperatorWithKernel {
namespace ops = paddle::operators;
namespace plat = paddle::platform;
DELCARE_INFER_SHAPE_FUNCTOR(multinomial, MultinomialInferShapeFunctor,
PT_INFER_META(phi::MultinomialInferMeta));
DECLARE_INFER_SHAPE_FUNCTOR(multinomial, MultinomialInferShapeFunctor,
PD_INFER_META(phi::MultinomialInferMeta));
REGISTER_OPERATOR(
multinomial, ops::MultinomialOp, ops::MultinomialOpMaker,
paddle::framework::EmptyGradOpMaker<paddle::framework::OpDesc>,
......
......@@ -94,8 +94,8 @@ class MVOpGrad : public framework::OperatorWithKernel {
namespace ops = paddle::operators;
namespace plat = paddle::platform;
DELCARE_INFER_SHAPE_FUNCTOR(mv, MvInferShapeFunctor,
PT_INFER_META(phi::MvInferMeta));
DECLARE_INFER_SHAPE_FUNCTOR(mv, MvInferShapeFunctor,
PD_INFER_META(phi::MvInferMeta));
REGISTER_OPERATOR(mv, ops::MVOp, ops::MVOpMaker,
ops::MVOpGradMaker<paddle::framework::OpDesc>,
......
......@@ -124,8 +124,8 @@ class PixelShuffleGradOp : public framework::OperatorWithKernel {
} // namespace paddle
namespace ops = paddle::operators;
DELCARE_INFER_SHAPE_FUNCTOR(pixel_shuffle, PixelShuffleInferShapeFunctor,
PT_INFER_META(phi::PixelShuffleInferMeta));
DECLARE_INFER_SHAPE_FUNCTOR(pixel_shuffle, PixelShuffleInferShapeFunctor,
PD_INFER_META(phi::PixelShuffleInferMeta));
REGISTER_OPERATOR(pixel_shuffle, ops::PixelShuffleOp, ops::PixelShuffleOpMaker,
ops::PixelShuffleGradMaker<paddle::framework::OpDesc>,
......
......@@ -87,8 +87,8 @@ class PoissonGradOpMaker : public framework::SingleGradOpMaker<T> {
namespace ops = paddle::operators;
namespace plat = paddle::platform;
DELCARE_INFER_SHAPE_FUNCTOR(poisson, PoissonInferShapeFunctor,
PT_INFER_META(phi::UnchangedInferMeta));
DECLARE_INFER_SHAPE_FUNCTOR(poisson, PoissonInferShapeFunctor,
PD_INFER_META(phi::UnchangedInferMeta));
REGISTER_OPERATOR(poisson, ops::PoissonOp, ops::PoissonOpMaker,
ops::PoissonOpInferVarType,
......
......@@ -82,8 +82,8 @@ DECLARE_INPLACE_OP_INFERER(RealGradOpInplaceInferer,
} // namespace operators
} // namespace paddle
DELCARE_INFER_SHAPE_FUNCTOR(real, RealInferShapeFunctor,
PT_INFER_META(phi::RealAndImagInferMeta));
DECLARE_INFER_SHAPE_FUNCTOR(real, RealInferShapeFunctor,
PD_INFER_META(phi::RealAndImagInferMeta));
namespace ops = paddle::operators;
......
......@@ -96,8 +96,8 @@ class __reduce_meanMaker__ : public ops::ReduceOpMaker {
virtual std::string GetOpType() const { return "Reduce reduce_mean"; }
};
DELCARE_INFER_SHAPE_FUNCTOR(reduce_mean, ReduceMeanInferShapeFunctor,
PT_INFER_META(phi::MeanRawInferMeta));
DECLARE_INFER_SHAPE_FUNCTOR(reduce_mean, ReduceMeanInferShapeFunctor,
PD_INFER_META(phi::MeanRawInferMeta));
REGISTER_OPERATOR(reduce_mean, ops::ReduceOp, __reduce_meanMaker__,
ops::ReduceMeanOpGradMaker<paddle::framework::OpDesc>,
......
......@@ -102,8 +102,8 @@ class ReduceSumOpMaker : public ops::ReduceOpMaker {
virtual std::string GetOpType() const { return "Reduce reduce_sum"; }
};
DELCARE_INFER_SHAPE_FUNCTOR(reduce_sum, ReduceSumInferShapeFunctor,
PT_INFER_META(phi::ReduceInferMetaBase));
DECLARE_INFER_SHAPE_FUNCTOR(reduce_sum, ReduceSumInferShapeFunctor,
PD_INFER_META(phi::ReduceInferMetaBase));
REGISTER_OPERATOR(reduce_sum, ops::ReduceOp, ReduceSumOpMaker,
ops::ReduceSumVarTypeInference,
......
......@@ -121,8 +121,8 @@ DECLARE_INPLACE_OP_INFERER(ScaleOpInplaceInferer, {"X", "Out"});
namespace ops = paddle::operators;
DELCARE_INFER_SHAPE_FUNCTOR(scale, ScaleInferShapeFunctor,
PT_INFER_META(phi::UnchangedInferMeta));
DECLARE_INFER_SHAPE_FUNCTOR(scale, ScaleInferShapeFunctor,
PD_INFER_META(phi::UnchangedInferMeta));
REGISTER_OPERATOR(scale, ops::ScaleOp, ops::ScaleOpMaker,
ops::ScaleGradMaker<paddle::framework::OpDesc>,
ops::ScaleGradMaker<paddle::imperative::OpBase>,
......
......@@ -119,12 +119,12 @@ DECLARE_NO_NEED_BUFFER_VARS_INFERER(ScatterNdAddGradNoNeedBufferVarsInferer,
namespace ops = paddle::operators;
DELCARE_INFER_SHAPE_FUNCTOR(scatter_nd_add, ScatterNdAddInferShapeFunctor,
PT_INFER_META(phi::ScatterNdAddInferMeta));
DECLARE_INFER_SHAPE_FUNCTOR(scatter_nd_add, ScatterNdAddInferShapeFunctor,
PD_INFER_META(phi::ScatterNdAddInferMeta));
DELCARE_INFER_SHAPE_FUNCTOR(scatter_nd_add_grad,
DECLARE_INFER_SHAPE_FUNCTOR(scatter_nd_add_grad,
ScatterNdAddGradInferShapeFunctor,
PT_INFER_META(phi::ScatterNdAddGradInferMeta));
PD_INFER_META(phi::ScatterNdAddGradInferMeta));
REGISTER_OPERATOR(scatter_nd_add, ops::ScatterNdAddOp, ops::ScatterNdAddOpMaker,
ops::ScatterNdAddGradMaker<paddle::framework::OpDesc>,
......
......@@ -103,11 +103,11 @@ DECLARE_INPLACE_OP_INFERER(ScatterInplaceInferer, {"X", "Out"});
} // namespace operators
} // namespace paddle
DELCARE_INFER_SHAPE_FUNCTOR(scatter, ScatterInferShapeFunctor,
PT_INFER_META(phi::ScatterInferMeta));
DECLARE_INFER_SHAPE_FUNCTOR(scatter, ScatterInferShapeFunctor,
PD_INFER_META(phi::ScatterInferMeta));
DELCARE_INFER_SHAPE_FUNCTOR(scatter_grad, ScatterGradInferShapeFunctor,
PT_INFER_META(phi::ScatterGradInferMeta));
DECLARE_INFER_SHAPE_FUNCTOR(scatter_grad, ScatterGradInferShapeFunctor,
PD_INFER_META(phi::ScatterGradInferMeta));
namespace ops = paddle::operators;
REGISTER_OPERATOR(scatter, ops::ScatterOp, ops::ScatterOpMaker,
......
......@@ -120,8 +120,8 @@ class SeluGradOp : public framework::OperatorWithKernel {
namespace ops = paddle::operators;
DELCARE_INFER_SHAPE_FUNCTOR(selu, SeluInferShapeFunctor,
PT_INFER_META(phi::UnchangedInferMeta));
DECLARE_INFER_SHAPE_FUNCTOR(selu, SeluInferShapeFunctor,
PD_INFER_META(phi::UnchangedInferMeta));
REGISTER_OPERATOR(selu, ops::SeluOp, ops::SeluOpMaker, ops::SeluOpInferVarType,
ops::SeluGradMaker<paddle::framework::OpDesc>,
......
......@@ -60,8 +60,8 @@ class SignGradMaker : public framework::SingleGradOpMaker<T> {
namespace ops = paddle::operators;
DELCARE_INFER_SHAPE_FUNCTOR(sign, SignInferShapeFunctor,
PT_INFER_META(phi::UnchangedInferMeta));
DECLARE_INFER_SHAPE_FUNCTOR(sign, SignInferShapeFunctor,
PD_INFER_META(phi::UnchangedInferMeta));
REGISTER_OPERATOR(sign, ops::SignOp, ops::SignOpMaker<float>,
ops::SignGradMaker<paddle::framework::OpDesc>,
ops::SignGradMaker<paddle::imperative::OpBase>,
......
......@@ -44,8 +44,8 @@ Return the number of elements in the input.
} // namespace paddle
namespace ops = paddle::operators;
DELCARE_INFER_SHAPE_FUNCTOR(size, SizeInferShapeFunctor,
PT_INFER_META(phi::SizeInferMeta));
DECLARE_INFER_SHAPE_FUNCTOR(size, SizeInferShapeFunctor,
PD_INFER_META(phi::SizeInferMeta));
REGISTER_OPERATOR(
size, ops::SizeOp, ops::SizeOpMaker,
paddle::framework::EmptyGradOpMaker<paddle::framework::OpDesc>,
......
......@@ -107,8 +107,8 @@ DECLARE_NO_NEED_BUFFER_VARS_INFERER(TraceGradNoNeedBufferVarsInferer, "Input");
} // namespace paddle
namespace ops = paddle::operators;
DELCARE_INFER_SHAPE_FUNCTOR(trace, TraceInferShapeFunctor,
PT_INFER_META(phi::TraceInferMeta));
DECLARE_INFER_SHAPE_FUNCTOR(trace, TraceInferShapeFunctor,
PD_INFER_META(phi::TraceInferMeta));
REGISTER_OPERATOR(trace, ops::TraceOp, ops::TraceOpMaker,
ops::TraceGradOpMaker<paddle::framework::OpDesc>,
ops::TraceGradOpMaker<paddle::imperative::OpBase>,
......
......@@ -120,8 +120,8 @@ class TriangularSolveOpGradMaker : public framework::SingleGradOpMaker<T> {
namespace ops = paddle::operators;
DELCARE_INFER_SHAPE_FUNCTOR(triangular_solve, TriangularSolveInferShapeFunctor,
PT_INFER_META(phi::TriangularSolveInferMeta));
DECLARE_INFER_SHAPE_FUNCTOR(triangular_solve, TriangularSolveInferShapeFunctor,
PD_INFER_META(phi::TriangularSolveInferMeta));
REGISTER_OPERATOR(triangular_solve, ops::TriangularSolveOp,
ops::TriangularSolveOpMaker,
......
......@@ -69,8 +69,8 @@ class TruncGradOpMaker : public framework::SingleGradOpMaker<T> {
} // namespace operators
} // namespace paddle
DELCARE_INFER_SHAPE_FUNCTOR(trunc, TruncInferShapeFunctor,
PT_INFER_META(phi::UnchangedInferMeta));
DECLARE_INFER_SHAPE_FUNCTOR(trunc, TruncInferShapeFunctor,
PD_INFER_META(phi::UnchangedInferMeta));
namespace ops = paddle::operators;
REGISTER_OPERATOR(trunc, ops::TruncOp, ops::TruncOpMaker,
......
......@@ -119,8 +119,8 @@ DECLARE_NO_NEED_BUFFER_VARS_INFERER(UnfoldGradOpNoNeedBufferVarsInferer, "X");
} // namespace paddle
namespace ops = paddle::operators;
DELCARE_INFER_SHAPE_FUNCTOR(unfold, UnfoldInferShapeFunctor,
PT_INFER_META(phi::UnfoldInferMeta));
DECLARE_INFER_SHAPE_FUNCTOR(unfold, UnfoldInferShapeFunctor,
PD_INFER_META(phi::UnfoldInferMeta));
REGISTER_OPERATOR(unfold, ops::UnfoldOp, ops::UnfoldOpMaker,
ops::UnfoldGradMaker<paddle::framework::OpDesc>,
ops::UnfoldGradMaker<paddle::imperative::OpBase>,
......
......@@ -117,8 +117,8 @@ DECLARE_NO_NEED_BUFFER_VARS_INFERER(WhereGradNoNeedBufferVarsInferer, "X", "Y");
} // namespace paddle
namespace ops = paddle::operators;
DELCARE_INFER_SHAPE_FUNCTOR(where, WhereInferShapeFunctor,
PT_INFER_META(phi::WhereInferMeta));
DECLARE_INFER_SHAPE_FUNCTOR(where, WhereInferShapeFunctor,
PD_INFER_META(phi::WhereInferMeta));
REGISTER_OPERATOR(where, ops::WhereOp, ops::WhereOpMaker,
ops::WhereOpGradMaker<paddle::framework::OpDesc>,
ops::WhereOpGradMaker<paddle::imperative::OpBase>,
......
......@@ -82,7 +82,7 @@ inline size_t SizeOf(DataType data_type) {
return 0;
}
#define PT_FOR_EACH_DATA_TYPE(_) \
#define PD_FOR_EACH_DATA_TYPE(_) \
_(bool, DataType::BOOL) \
_(int8_t, DataType::INT8) \
_(uint8_t, DataType::UINT8) \
......@@ -105,25 +105,25 @@ struct DataTypeToCppType;
template <typename T>
struct CppTypeToDataType;
#define PT_SPECIALIZE_DataTypeToCppType(cpp_type, data_type) \
#define PD_SPECIALIZE_DataTypeToCppType(cpp_type, data_type) \
template <> \
struct DataTypeToCppType<data_type> { \
using type = cpp_type; \
};
PT_FOR_EACH_DATA_TYPE(PT_SPECIALIZE_DataTypeToCppType)
PD_FOR_EACH_DATA_TYPE(PD_SPECIALIZE_DataTypeToCppType)
#undef PT_SPECIALIZE_DataTypeToCppType
#undef PD_SPECIALIZE_DataTypeToCppType
#define PT_SPECIALIZE_CppTypeToDataType(cpp_type, data_type) \
#define PD_SPECIALIZE_CppTypeToDataType(cpp_type, data_type) \
template <> \
struct CppTypeToDataType<cpp_type> { \
constexpr static DataType Type() { return data_type; } \
};
PT_FOR_EACH_DATA_TYPE(PT_SPECIALIZE_CppTypeToDataType)
PD_FOR_EACH_DATA_TYPE(PD_SPECIALIZE_CppTypeToDataType)
#undef PT_SPECIALIZE_CppTypeToDataType
#undef PD_SPECIALIZE_CppTypeToDataType
inline std::ostream& operator<<(std::ostream& os, DataType dtype) {
switch (dtype) {
......
......@@ -166,7 +166,7 @@ struct ArgumentMappingFnRegistrar {
};
#define PD_REGISTER_BASE_KERNEL_NAME(op_type, base_kernel_name) \
PT_STATIC_ASSERT_GLOBAL_NAMESPACE( \
PD_STATIC_ASSERT_GLOBAL_NAMESPACE( \
PD_REGISTER_base_kernel_name_ns_check_##op_type, \
"PD_REGISTER_BASE_KERNEL_NAME must be called in global namespace."); \
static const ::phi::BaseKernelNameRegistrar \
......@@ -174,7 +174,7 @@ struct ArgumentMappingFnRegistrar {
int TouchBaseKernelNameSymbol_##op_type() { return 0; }
#define PD_DECLARE_BASE_KERNEL_NAME(op_type) \
PT_STATIC_ASSERT_GLOBAL_NAMESPACE( \
PD_STATIC_ASSERT_GLOBAL_NAMESPACE( \
PD_DECLARE_ai_name_ns_check_##op_type, \
"PD_DECLARE_BASE_KERNEL_NAME must be called in global namespace."); \
extern int TouchBaseKernelNameSymbol_##op_type(); \
......@@ -182,7 +182,7 @@ struct ArgumentMappingFnRegistrar {
TouchBaseKernelNameSymbol_##op_type()
#define PD_REGISTER_ARG_MAPPING_FN(op_type, arg_mapping_fn) \
PT_STATIC_ASSERT_GLOBAL_NAMESPACE( \
PD_STATIC_ASSERT_GLOBAL_NAMESPACE( \
PD_REGISTER_arg_map_fn_ns_check_##op_type, \
"PD_REGISTER_ARG_MAPPING_FN must be called in global namespace."); \
static const ::phi::ArgumentMappingFnRegistrar \
......@@ -190,7 +190,7 @@ struct ArgumentMappingFnRegistrar {
int TouchArgumentMappingFnSymbol_##op_type() { return 0; }
#define PD_DECLARE_ARG_MAPPING_FN(op_type) \
PT_STATIC_ASSERT_GLOBAL_NAMESPACE( \
PD_STATIC_ASSERT_GLOBAL_NAMESPACE( \
PD_DECLARE_arg_map_fn_ns_check_##op_type, \
"PD_DECLARE_ARG_MAPPING_FN must be called in global namespace."); \
extern int TouchArgumentMappingFnSymbol_##op_type(); \
......
......@@ -86,10 +86,10 @@ class InferMetaContext {
paddle::SmallVector<std::pair<int, int>> output_range_;
};
#define PT_INFER_META(...) \
#define PD_INFER_META(...) \
::phi::InferMetaFnImpl<decltype(&__VA_ARGS__), &__VA_ARGS__>::Call
#define PT_SPECIALIZE_InferMetaFnCallHelper_FOR_ATTRIBUTE(attr_type) \
#define PD_SPECIALIZE_InferMetaFnCallHelper_FOR_ATTRIBUTE(attr_type) \
template <typename... Tail> \
struct InferMetaFnCallHelper<attr_type, Tail...> { \
template <int in_idx, int attr_idx, int out_idx, typename... PreviousArgs> \
......@@ -175,24 +175,24 @@ struct InferMetaFnImpl<Return (*)(Args...), infer_meta_fn> {
};
// TODO(chenweihang): support other attr type later
PT_SPECIALIZE_InferMetaFnCallHelper_FOR_ATTRIBUTE(bool);
PT_SPECIALIZE_InferMetaFnCallHelper_FOR_ATTRIBUTE(int);
PT_SPECIALIZE_InferMetaFnCallHelper_FOR_ATTRIBUTE(int64_t);
PT_SPECIALIZE_InferMetaFnCallHelper_FOR_ATTRIBUTE(float);
PT_SPECIALIZE_InferMetaFnCallHelper_FOR_ATTRIBUTE(const std::string&);
PT_SPECIALIZE_InferMetaFnCallHelper_FOR_ATTRIBUTE(const std::vector<bool>&);
PT_SPECIALIZE_InferMetaFnCallHelper_FOR_ATTRIBUTE(const std::vector<int>&);
PT_SPECIALIZE_InferMetaFnCallHelper_FOR_ATTRIBUTE(
PD_SPECIALIZE_InferMetaFnCallHelper_FOR_ATTRIBUTE(bool);
PD_SPECIALIZE_InferMetaFnCallHelper_FOR_ATTRIBUTE(int);
PD_SPECIALIZE_InferMetaFnCallHelper_FOR_ATTRIBUTE(int64_t);
PD_SPECIALIZE_InferMetaFnCallHelper_FOR_ATTRIBUTE(float);
PD_SPECIALIZE_InferMetaFnCallHelper_FOR_ATTRIBUTE(const std::string&);
PD_SPECIALIZE_InferMetaFnCallHelper_FOR_ATTRIBUTE(const std::vector<bool>&);
PD_SPECIALIZE_InferMetaFnCallHelper_FOR_ATTRIBUTE(const std::vector<int>&);
PD_SPECIALIZE_InferMetaFnCallHelper_FOR_ATTRIBUTE(
const std::vector<int64_t>&);
PT_SPECIALIZE_InferMetaFnCallHelper_FOR_ATTRIBUTE(const std::vector<float>&);
PT_SPECIALIZE_InferMetaFnCallHelper_FOR_ATTRIBUTE(const std::vector<double>&);
PT_SPECIALIZE_InferMetaFnCallHelper_FOR_ATTRIBUTE(
PD_SPECIALIZE_InferMetaFnCallHelper_FOR_ATTRIBUTE(const std::vector<float>&);
PD_SPECIALIZE_InferMetaFnCallHelper_FOR_ATTRIBUTE(const std::vector<double>&);
PD_SPECIALIZE_InferMetaFnCallHelper_FOR_ATTRIBUTE(
const std::vector<std::string>&);
PT_SPECIALIZE_InferMetaFnCallHelper_FOR_ATTRIBUTE(DataType);
PT_SPECIALIZE_InferMetaFnCallHelper_FOR_ATTRIBUTE(Backend);
PT_SPECIALIZE_InferMetaFnCallHelper_FOR_ATTRIBUTE(DataLayout);
PT_SPECIALIZE_InferMetaFnCallHelper_FOR_ATTRIBUTE(const Scalar&);
PT_SPECIALIZE_InferMetaFnCallHelper_FOR_ATTRIBUTE(const ScalarArray&);
PD_SPECIALIZE_InferMetaFnCallHelper_FOR_ATTRIBUTE(DataType);
PD_SPECIALIZE_InferMetaFnCallHelper_FOR_ATTRIBUTE(Backend);
PD_SPECIALIZE_InferMetaFnCallHelper_FOR_ATTRIBUTE(DataLayout);
PD_SPECIALIZE_InferMetaFnCallHelper_FOR_ATTRIBUTE(const Scalar&);
PD_SPECIALIZE_InferMetaFnCallHelper_FOR_ATTRIBUTE(const ScalarArray&);
// TODO(chenweihang): support vector<MetaTensor> input later
......@@ -304,11 +304,11 @@ struct InferMetaFnRegistrar {
};
#define PD_REGISTER_INFER_META_FN(kernel_name_prefix, variadic_infer_meta_fn) \
PT_STATIC_ASSERT_GLOBAL_NAMESPACE( \
PD_STATIC_ASSERT_GLOBAL_NAMESPACE( \
PD_REGISTER_infer_meta_fn_ns_check_##kernel_name_prefix, \
"PD_REGISTER_INFER_META_FN must be called in global namespace."); \
static const ::phi::InferMetaFnRegistrar \
__registrar_arg_map_fn_for_##kernel_name_prefix( \
#kernel_name_prefix, PT_INFER_META(variadic_infer_meta_fn))
#kernel_name_prefix, PD_INFER_META(variadic_infer_meta_fn))
} // namespace phi
此差异已折叠。
......@@ -30,14 +30,15 @@
namespace phi {
#define PT_KERNEL(...) \
// PD_KERNEL has been used by custom op api
#define PHI_KERNEL(...) \
::phi::KernelImpl<decltype(&__VA_ARGS__), &__VA_ARGS__>::Compute
#define PT_VARIADIC_KERNEL(...) \
#define PHI_VARIADIC_KERNEL(...) \
reinterpret_cast<void*>(&::phi::KernelImpl<decltype(&__VA_ARGS__), \
&__VA_ARGS__>::VariadicCompute)
#define PT_SPECIALIZE_KernelCallHelper_FOR_DEVICE_CONTEXT(dev_ctx) \
#define PD_SPECIALIZE_KernelCallHelper_FOR_DEVICE_CONTEXT(dev_ctx) \
template <typename... Tail> \
struct KernelCallHelper<const dev_ctx&, Tail...> { \
template <int dev_ctx_idx, \
......@@ -60,7 +61,7 @@ namespace phi {
} \
}
#define PT_SPECIALIZE_KernelCallHelper_FOR_INPUT(tensor_type) \
#define PD_SPECIALIZE_KernelCallHelper_FOR_INPUT(tensor_type) \
template <typename... Tail> \
struct KernelCallHelper<const tensor_type&, Tail...> { \
template <int dev_ctx_idx, \
......@@ -81,7 +82,7 @@ namespace phi {
} \
}
#define PT_SPECIALIZE_KernelCallHelper_FOR_OPTIONAL_INPUT(tensor_type) \
#define PD_SPECIALIZE_KernelCallHelper_FOR_OPTIONAL_INPUT(tensor_type) \
template <typename... Tail> \
struct KernelCallHelper<paddle::optional<const tensor_type&>, Tail...> { \
template <int dev_ctx_idx, \
......@@ -102,7 +103,7 @@ namespace phi {
} \
}
#define PT_SPECIALIZE_KernelCallHelper_FOR_MULTI_INPUT(tensor_type) \
#define PD_SPECIALIZE_KernelCallHelper_FOR_MULTI_INPUT(tensor_type) \
template <typename... Tail> \
struct KernelCallHelper<const std::vector<const tensor_type*>&, Tail...> { \
template <int dev_ctx_idx, \
......@@ -124,7 +125,7 @@ namespace phi {
} \
}
#define PT_SPECIALIZE_KernelCallHelper_FOR_ATTRIBUTE(attr_type) \
#define PD_SPECIALIZE_KernelCallHelper_FOR_ATTRIBUTE(attr_type) \
template <typename... Tail> \
struct KernelCallHelper<attr_type, Tail...> { \
template <int dev_ctx_idx, \
......@@ -142,7 +143,7 @@ namespace phi {
} \
}
#define PT_SPECIALIZE_KernelCallHelper_FOR_OUTPUT(tensor_type) \
#define PD_SPECIALIZE_KernelCallHelper_FOR_OUTPUT(tensor_type) \
template <typename... Tail> \
struct KernelCallHelper<tensor_type*, Tail...> { \
template <int dev_ctx_idx, \
......@@ -159,7 +160,7 @@ namespace phi {
} \
}
#define PT_SPECIALIZE_KernelCallHelper_FOR_MULTI_OUTPUT(tensor_type) \
#define PD_SPECIALIZE_KernelCallHelper_FOR_MULTI_OUTPUT(tensor_type) \
template <typename... Tail> \
struct KernelCallHelper<std::vector<tensor_type*>, Tail...> { \
template <int dev_ctx_idx, \
......@@ -204,65 +205,65 @@ struct KernelImpl<Return (*)(DevCtx, Args...), kernel_fn> {
/* DeviceContext Helpers */
PT_SPECIALIZE_KernelCallHelper_FOR_DEVICE_CONTEXT(CPUContext);
PD_SPECIALIZE_KernelCallHelper_FOR_DEVICE_CONTEXT(CPUContext);
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
PT_SPECIALIZE_KernelCallHelper_FOR_DEVICE_CONTEXT(GPUContext);
PD_SPECIALIZE_KernelCallHelper_FOR_DEVICE_CONTEXT(GPUContext);
#endif
#ifdef PADDLE_WITH_XPU
PT_SPECIALIZE_KernelCallHelper_FOR_DEVICE_CONTEXT(XPUContext);
PD_SPECIALIZE_KernelCallHelper_FOR_DEVICE_CONTEXT(XPUContext);
#endif
#ifdef PADDLE_WITH_CUSTOM_DEVICE
PT_SPECIALIZE_KernelCallHelper_FOR_DEVICE_CONTEXT(CustomContext);
PD_SPECIALIZE_KernelCallHelper_FOR_DEVICE_CONTEXT(CustomContext);
#endif
/* Input Helpers */
PT_SPECIALIZE_KernelCallHelper_FOR_INPUT(DenseTensor);
PT_SPECIALIZE_KernelCallHelper_FOR_OPTIONAL_INPUT(DenseTensor);
PT_SPECIALIZE_KernelCallHelper_FOR_OPTIONAL_INPUT(SelectedRows);
PT_SPECIALIZE_KernelCallHelper_FOR_MULTI_INPUT(DenseTensor);
PT_SPECIALIZE_KernelCallHelper_FOR_INPUT(SelectedRows);
PD_SPECIALIZE_KernelCallHelper_FOR_INPUT(DenseTensor);
PD_SPECIALIZE_KernelCallHelper_FOR_OPTIONAL_INPUT(DenseTensor);
PD_SPECIALIZE_KernelCallHelper_FOR_OPTIONAL_INPUT(SelectedRows);
PD_SPECIALIZE_KernelCallHelper_FOR_MULTI_INPUT(DenseTensor);
PD_SPECIALIZE_KernelCallHelper_FOR_INPUT(SelectedRows);
PT_SPECIALIZE_KernelCallHelper_FOR_INPUT(SparseCooTensor);
PT_SPECIALIZE_KernelCallHelper_FOR_OPTIONAL_INPUT(SparseCooTensor);
PT_SPECIALIZE_KernelCallHelper_FOR_MULTI_INPUT(SparseCooTensor);
PD_SPECIALIZE_KernelCallHelper_FOR_INPUT(SparseCooTensor);
PD_SPECIALIZE_KernelCallHelper_FOR_OPTIONAL_INPUT(SparseCooTensor);
PD_SPECIALIZE_KernelCallHelper_FOR_MULTI_INPUT(SparseCooTensor);
PT_SPECIALIZE_KernelCallHelper_FOR_INPUT(SparseCsrTensor);
PT_SPECIALIZE_KernelCallHelper_FOR_OPTIONAL_INPUT(SparseCsrTensor);
PT_SPECIALIZE_KernelCallHelper_FOR_MULTI_INPUT(SparseCsrTensor);
PD_SPECIALIZE_KernelCallHelper_FOR_INPUT(SparseCsrTensor);
PD_SPECIALIZE_KernelCallHelper_FOR_OPTIONAL_INPUT(SparseCsrTensor);
PD_SPECIALIZE_KernelCallHelper_FOR_MULTI_INPUT(SparseCsrTensor);
/* Attribute Helpers */
PT_SPECIALIZE_KernelCallHelper_FOR_ATTRIBUTE(bool);
PT_SPECIALIZE_KernelCallHelper_FOR_ATTRIBUTE(float);
PT_SPECIALIZE_KernelCallHelper_FOR_ATTRIBUTE(double);
PT_SPECIALIZE_KernelCallHelper_FOR_ATTRIBUTE(int);
PT_SPECIALIZE_KernelCallHelper_FOR_ATTRIBUTE(int64_t);
PT_SPECIALIZE_KernelCallHelper_FOR_ATTRIBUTE(phi::dtype::float16);
PT_SPECIALIZE_KernelCallHelper_FOR_ATTRIBUTE(const Scalar&);
PT_SPECIALIZE_KernelCallHelper_FOR_ATTRIBUTE(DataType);
PT_SPECIALIZE_KernelCallHelper_FOR_ATTRIBUTE(DataLayout);
PT_SPECIALIZE_KernelCallHelper_FOR_ATTRIBUTE(Place);
PT_SPECIALIZE_KernelCallHelper_FOR_ATTRIBUTE(const std::vector<int64_t>&);
PT_SPECIALIZE_KernelCallHelper_FOR_ATTRIBUTE(const ScalarArray&);
PT_SPECIALIZE_KernelCallHelper_FOR_ATTRIBUTE(const std::vector<int>&);
PT_SPECIALIZE_KernelCallHelper_FOR_ATTRIBUTE(const std::string&);
PT_SPECIALIZE_KernelCallHelper_FOR_ATTRIBUTE(const std::vector<bool>&);
PT_SPECIALIZE_KernelCallHelper_FOR_ATTRIBUTE(const std::vector<float>&);
PT_SPECIALIZE_KernelCallHelper_FOR_ATTRIBUTE(const std::vector<double>&);
PT_SPECIALIZE_KernelCallHelper_FOR_ATTRIBUTE(const std::vector<std::string>&);
PD_SPECIALIZE_KernelCallHelper_FOR_ATTRIBUTE(bool);
PD_SPECIALIZE_KernelCallHelper_FOR_ATTRIBUTE(float);
PD_SPECIALIZE_KernelCallHelper_FOR_ATTRIBUTE(double);
PD_SPECIALIZE_KernelCallHelper_FOR_ATTRIBUTE(int);
PD_SPECIALIZE_KernelCallHelper_FOR_ATTRIBUTE(int64_t);
PD_SPECIALIZE_KernelCallHelper_FOR_ATTRIBUTE(phi::dtype::float16);
PD_SPECIALIZE_KernelCallHelper_FOR_ATTRIBUTE(const Scalar&);
PD_SPECIALIZE_KernelCallHelper_FOR_ATTRIBUTE(DataType);
PD_SPECIALIZE_KernelCallHelper_FOR_ATTRIBUTE(DataLayout);
PD_SPECIALIZE_KernelCallHelper_FOR_ATTRIBUTE(Place);
PD_SPECIALIZE_KernelCallHelper_FOR_ATTRIBUTE(const std::vector<int64_t>&);
PD_SPECIALIZE_KernelCallHelper_FOR_ATTRIBUTE(const ScalarArray&);
PD_SPECIALIZE_KernelCallHelper_FOR_ATTRIBUTE(const std::vector<int>&);
PD_SPECIALIZE_KernelCallHelper_FOR_ATTRIBUTE(const std::string&);
PD_SPECIALIZE_KernelCallHelper_FOR_ATTRIBUTE(const std::vector<bool>&);
PD_SPECIALIZE_KernelCallHelper_FOR_ATTRIBUTE(const std::vector<float>&);
PD_SPECIALIZE_KernelCallHelper_FOR_ATTRIBUTE(const std::vector<double>&);
PD_SPECIALIZE_KernelCallHelper_FOR_ATTRIBUTE(const std::vector<std::string>&);
/* Output Helpers */
PT_SPECIALIZE_KernelCallHelper_FOR_OUTPUT(DenseTensor);
PT_SPECIALIZE_KernelCallHelper_FOR_MULTI_OUTPUT(DenseTensor);
PT_SPECIALIZE_KernelCallHelper_FOR_OUTPUT(SelectedRows);
PD_SPECIALIZE_KernelCallHelper_FOR_OUTPUT(DenseTensor);
PD_SPECIALIZE_KernelCallHelper_FOR_MULTI_OUTPUT(DenseTensor);
PD_SPECIALIZE_KernelCallHelper_FOR_OUTPUT(SelectedRows);
PT_SPECIALIZE_KernelCallHelper_FOR_OUTPUT(SparseCooTensor);
PT_SPECIALIZE_KernelCallHelper_FOR_MULTI_OUTPUT(SparseCooTensor);
PD_SPECIALIZE_KernelCallHelper_FOR_OUTPUT(SparseCooTensor);
PD_SPECIALIZE_KernelCallHelper_FOR_MULTI_OUTPUT(SparseCooTensor);
PT_SPECIALIZE_KernelCallHelper_FOR_OUTPUT(SparseCsrTensor);
PT_SPECIALIZE_KernelCallHelper_FOR_MULTI_OUTPUT(SparseCsrTensor);
PD_SPECIALIZE_KernelCallHelper_FOR_OUTPUT(SparseCsrTensor);
PD_SPECIALIZE_KernelCallHelper_FOR_MULTI_OUTPUT(SparseCsrTensor);
/* End case */
template <typename T>
......
......@@ -26,19 +26,19 @@ namespace phi {
classname& operator=(classname&&) = delete
#endif
#define PT_STATIC_ASSERT_GLOBAL_NAMESPACE(uniq_name, msg) \
_PT_STATIC_ASSERT_GLOBAL_NAMESPACE(uniq_name, msg)
#define PD_STATIC_ASSERT_GLOBAL_NAMESPACE(uniq_name, msg) \
_PD_STATIC_ASSERT_GLOBAL_NAMESPACE(uniq_name, msg)
#define _PT_STATIC_ASSERT_GLOBAL_NAMESPACE(uniq_name, msg) \
#define _PD_STATIC_ASSERT_GLOBAL_NAMESPACE(uniq_name, msg) \
struct __test_global_namespace_##uniq_name##__ {}; \
static_assert(std::is_same<::__test_global_namespace_##uniq_name##__, \
__test_global_namespace_##uniq_name##__>::value, \
msg)
#ifdef __COUNTER__
#define PT_ID __COUNTER__
#define PD_ID __COUNTER__
#else
#define PT_ID __LINE__
#define PD_ID __LINE__
#endif
#if defined(_WIN32)
......@@ -48,9 +48,9 @@ namespace phi {
#define UNUSED __attribute__((unused))
#endif
#define PT_CONCATENATE(arg1, arg2) PT_CONCATENATE1(arg1, arg2)
#define PT_CONCATENATE1(arg1, arg2) PT_CONCATENATE2(arg1, arg2)
#define PT_CONCATENATE2(arg1, arg2) arg1##arg2
#define PT_EXPAND(x) x
#define PD_CONCATENATE(arg1, arg2) PD_CONCATENATE1(arg1, arg2)
#define PD_CONCATENATE1(arg1, arg2) PD_CONCATENATE2(arg1, arg2)
#define PD_CONCATENATE2(arg1, arg2) arg1##arg2
#define PD_EXPAND(x) x
} // namespace phi
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册