From fe78a92e6ea57a201f320c572d7f2ad6a0ff968a Mon Sep 17 00:00:00 2001 From: luotao1 Date: Tue, 12 Mar 2019 21:15:13 +0800 Subject: [PATCH] refine with comments test=develop --- paddle/fluid/framework/operator.cc | 7 +------ paddle/fluid/framework/operator.h | 9 +++++++++ .../fluid/operators/fused/fused_embedding_seq_pool_op.cc | 8 +------- paddle/fluid/operators/hash_op.cc | 8 +------- .../operators/sequence_ops/sequence_enumerate_op.cc | 8 +------- 5 files changed, 13 insertions(+), 27 deletions(-) diff --git a/paddle/fluid/framework/operator.cc b/paddle/fluid/framework/operator.cc index 9f48b8cb9e7..e4bbcabea71 100644 --- a/paddle/fluid/framework/operator.cc +++ b/paddle/fluid/framework/operator.cc @@ -926,12 +926,7 @@ void OperatorWithKernel::RunImpl(const Scope& scope, dev_ctx = pool.Get(expected_kernel_key.place_); } - // If Op has attribute all_kernels_must_compute_runtime_shape, - // all the kernels of this Op would compute runtime shape, - // and skip infershape in runtime for speedup. - // TODO(luotao): Note that it is a temporal attribute, after all ops - // implement computing runtime shape, this attribute would be deleted. - if (!HasAttr("all_kernels_must_compute_runtime_shape")) { + if (!HasAttr(kAllKernelsMustComputeRuntimeShape)) { RuntimeInferShapeContext infer_shape_ctx(*this, exec_scope, ctx); this->InferShape(&infer_shape_ctx); } diff --git a/paddle/fluid/framework/operator.h b/paddle/fluid/framework/operator.h index 55629636a81..822bf5c9cea 100644 --- a/paddle/fluid/framework/operator.h +++ b/paddle/fluid/framework/operator.h @@ -62,6 +62,15 @@ constexpr char kZeroVarSuffix[] = "@ZERO"; /// Variables with this suffix are the new Gradient. constexpr char kNewGradSuffix[] = "@NEWGRAD@"; +/// If an Op has this attribute, all its kernels should calculate output +/// variable's shape in the corresponding Compute() function. And +/// OperatorWithKernel::RunImpl() would skip call this Op's InferShape() +/// function in its runtime for speedup. +/// TODO(luotao): Note that this temporal attribute would be deleted after all +/// ops contain it. +constexpr char kAllKernelsMustComputeRuntimeShape[] = + "@ALL_KERNELS_MUST_COMPUTE_RUNTIME_SHAPE@"; + // define some kernel priority /* Define multiple kernel type fallback order*/ extern std::vector> kKernelPriority; diff --git a/paddle/fluid/operators/fused/fused_embedding_seq_pool_op.cc b/paddle/fluid/operators/fused/fused_embedding_seq_pool_op.cc index 17a81d3e880..a0026427e25 100644 --- a/paddle/fluid/operators/fused/fused_embedding_seq_pool_op.cc +++ b/paddle/fluid/operators/fused/fused_embedding_seq_pool_op.cc @@ -88,13 +88,7 @@ class FusedEmbeddingSeqPoolOpMaker : public framework::OpProtoAndCheckerMaker { "(boolean, default false) " "Sparse update.") .SetDefault(false); - AddAttr( - "all_kernels_must_compute_runtime_shape", - "(boolean, default true) " - "An attribute to speed up OperatorWithKernel::RunImpl." - "If true, all the kernels of this Op would compute runtime " - "shape, but skip infershape in runtime. Note that it is a temporal " - "attribute, please do DOT set it in python layer.") + AddAttr(framework::kAllKernelsMustComputeRuntimeShape, "") .SetDefault(true); AddComment(R"DOC( FusedEmbeddingSeqPool Operator. diff --git a/paddle/fluid/operators/hash_op.cc b/paddle/fluid/operators/hash_op.cc index b39eba081ec..f6395fb32fe 100644 --- a/paddle/fluid/operators/hash_op.cc +++ b/paddle/fluid/operators/hash_op.cc @@ -54,13 +54,7 @@ $$Out = scale * X$$ )DOC"); AddAttr("num_hash", "").SetDefault(1); AddAttr("mod_by", "").SetDefault(100000); - AddAttr( - "all_kernels_must_compute_runtime_shape", - "(boolean, default true) " - "An attribute to speed up OperatorWithKernel::RunImpl." - "If true, all the kernels of this Op would compute runtime " - "shape, but skip infershape in runtime. Note that it is a temporal " - "attribute, please do DOT set it in python layer.") + AddAttr(framework::kAllKernelsMustComputeRuntimeShape, "") .SetDefault(true); } }; diff --git a/paddle/fluid/operators/sequence_ops/sequence_enumerate_op.cc b/paddle/fluid/operators/sequence_ops/sequence_enumerate_op.cc index 63e95e86544..f357c9c08d0 100644 --- a/paddle/fluid/operators/sequence_ops/sequence_enumerate_op.cc +++ b/paddle/fluid/operators/sequence_ops/sequence_enumerate_op.cc @@ -59,13 +59,7 @@ class SequenceEnumerateOpMaker : public framework::OpProtoAndCheckerMaker { }); AddAttr("pad_value", "(int) The enumerate sequence padding value.") .SetDefault(0); - AddAttr( - "all_kernels_must_compute_runtime_shape", - "(boolean, default true) " - "An attribute to speed up OperatorWithKernel::RunImpl." - "If true, all the kernels of this Op would compute runtime " - "shape, but skip infershape in runtime. Note that it is a temporal " - "attribute, please do DOT set it in python layer.") + AddAttr(framework::kAllKernelsMustComputeRuntimeShape, "") .SetDefault(true); AddComment(R"DOC( Sequence Enumerate Operator. -- GitLab