From 6d78524c27732fdc4f3505815d392d8f24b2dca8 Mon Sep 17 00:00:00 2001 From: zyfncg Date: Fri, 27 May 2022 20:47:18 +0800 Subject: [PATCH] [Phi] Change optional tensor from `optional` to `optional` (#42939) * refactor the optional tensor * remove optiona in InferMeta * fix bug * fix optional> * fix bug * fix rmsprop * fix amp of eager_gen * polish code * fix deleted code * fix merge conflict * polish code * remove is_nullopt_ * fix merge conflict * fix merge conflict --- .../final_state_generator/eager_gen.py | 21 +-- paddle/fluid/eager/eager_amp_auto_cast.h | 14 +- paddle/fluid/eager/utils.cc | 2 +- paddle/fluid/eager/utils.h | 2 +- paddle/fluid/framework/infershape_utils.cc | 16 +- paddle/fluid/framework/infershape_utils.h | 12 +- paddle/fluid/framework/operator.cc | 11 +- paddle/fluid/imperative/prepared_operator.h | 8 +- .../operators/fused/fused_dropout_test.h | 4 +- paddle/fluid/operators/inplace_abn_op.cc | 6 +- paddle/fluid/operators/inplace_abn_op.cu | 6 +- .../operators/optimizers/dgc_momentum_op.h | 9 +- paddle/fluid/pybind/eager_utils.cc | 4 +- paddle/fluid/pybind/eager_utils.h | 2 +- paddle/phi/api/lib/api_custom_impl.cc | 165 ++++-------------- paddle/phi/api/lib/api_custom_impl.h | 12 +- paddle/phi/api/lib/api_gen_utils.cc | 24 +-- paddle/phi/api/lib/api_gen_utils.h | 14 +- paddle/phi/api/lib/data_transform.cc | 17 +- paddle/phi/api/lib/data_transform.h | 7 +- paddle/phi/api/lib/kernel_dispatch.h | 4 +- paddle/phi/core/infermeta_utils.cc | 14 +- paddle/phi/core/infermeta_utils.h | 25 +-- paddle/phi/core/kernel_context.h | 16 +- paddle/phi/core/kernel_registry.h | 10 +- paddle/phi/core/kernel_utils.h | 6 +- paddle/phi/core/meta_tensor.h | 16 +- paddle/phi/infermeta/backward.cc | 35 ++-- paddle/phi/infermeta/backward.h | 33 ++-- paddle/phi/infermeta/binary.cc | 16 +- paddle/phi/infermeta/binary.h | 6 +- paddle/phi/infermeta/multiary.cc | 125 +++++++------ paddle/phi/infermeta/multiary.h | 67 ++++--- paddle/phi/infermeta/ternary.cc | 68 ++++---- paddle/phi/infermeta/ternary.h | 18 +- paddle/phi/kernels/activation_grad_kernel.h | 2 +- paddle/phi/kernels/adam_kernel.h | 4 +- paddle/phi/kernels/adamw_kernel.h | 4 +- paddle/phi/kernels/assign_kernel.cc | 2 +- paddle/phi/kernels/assign_kernel.h | 2 +- paddle/phi/kernels/batch_norm_grad_kernel.h | 16 +- .../kernels/bilinear_tensor_product_kernel.h | 2 +- paddle/phi/kernels/bincount_kernel.h | 2 +- paddle/phi/kernels/conv_grad_grad_kernel.h | 8 +- paddle/phi/kernels/cpu/adam_kernel.cc | 4 +- paddle/phi/kernels/cpu/adamw_kernel.cc | 4 +- .../phi/kernels/cpu/batch_norm_grad_kernel.cc | 16 +- paddle/phi/kernels/cpu/bincount_kernel.cc | 4 +- .../phi/kernels/cpu/conv_grad_grad_kernel.cc | 4 +- paddle/phi/kernels/cpu/dropout_kernel.cc | 2 +- .../cpu/elementwise_add_grad_kernel.cc | 4 +- .../cpu/elementwise_subtract_grad_kernel.cc | 4 +- .../phi/kernels/cpu/graph_reindex_kernel.cc | 4 +- .../cpu/graph_sample_neighbors_kernel.cc | 4 +- .../cpu/graph_send_recv_grad_kernel.cc | 4 +- .../kernels/cpu/hierarchical_sigmoid_grad.h | 6 +- .../cpu/hierarchical_sigmoid_grad_kernel.cc | 6 +- .../cpu/hierarchical_sigmoid_kernel.cc | 6 +- .../kernels/cpu/instance_norm_grad_kernel.cc | 10 +- .../phi/kernels/cpu/instance_norm_kernel.cc | 4 +- .../kernels/cpu/interpolate_grad_kernel.cc | 54 +++--- paddle/phi/kernels/cpu/interpolate_kernel.cc | 54 +++--- paddle/phi/kernels/cpu/label_smooth_kernel.cc | 2 +- .../phi/kernels/cpu/layer_norm_grad_kernel.cc | 4 +- paddle/phi/kernels/cpu/layer_norm_kernel.cc | 4 +- .../phi/kernels/cpu/nll_loss_grad_kernel.cc | 2 +- paddle/phi/kernels/cpu/nll_loss_kernel.cc | 2 +- .../phi/kernels/cpu/psroi_pool_grad_kernel.cc | 2 +- paddle/phi/kernels/cpu/psroi_pool_kernel.cc | 2 +- paddle/phi/kernels/cpu/rnn_grad_kernel.cc | 4 +- paddle/phi/kernels/cpu/rnn_kernel.cc | 2 +- .../phi/kernels/cpu/roi_align_grad_kernel.cc | 2 +- paddle/phi/kernels/cpu/roi_align_kernel.cc | 2 +- .../phi/kernels/cpu/roi_pool_grad_kernel.cc | 2 +- paddle/phi/kernels/cpu/roi_pool_kernel.cc | 2 +- paddle/phi/kernels/cpu/sgd_kernel.cc | 6 +- .../kernels/cpu/yolov3_loss_grad_kernel.cc | 2 +- paddle/phi/kernels/cpu/yolov3_loss_kernel.cc | 2 +- .../phi/kernels/deformable_conv_grad_kernel.h | 2 +- paddle/phi/kernels/deformable_conv_kernel.h | 2 +- paddle/phi/kernels/dropout_kernel.h | 2 +- .../phi/kernels/elementwise_add_grad_kernel.h | 4 +- .../kernels/elementwise_divide_grad_kernel.h | 4 +- .../elementwise_multiply_grad_kernel.h | 10 +- .../elementwise_subtract_grad_kernel.h | 4 +- paddle/phi/kernels/expand_as_kernel.h | 2 +- paddle/phi/kernels/funcs/pooling.cu | 2 +- paddle/phi/kernels/funcs/segment_pooling.cc | 2 +- paddle/phi/kernels/funcs/segment_pooling.cu | 2 +- paddle/phi/kernels/funcs/segment_pooling.h | 2 +- paddle/phi/kernels/gpu/adam_kernel.cu | 4 +- paddle/phi/kernels/gpu/adamw_kernel.cu | 4 +- .../phi/kernels/gpu/batch_norm_grad_kernel.cu | 16 +- paddle/phi/kernels/gpu/bincount_kernel.cu | 4 +- paddle/phi/kernels/gpu/dropout_kernel.cu | 2 +- .../gpu/elementwise_add_grad_kernel.cu | 4 +- .../gpu/elementwise_subtract_grad_kernel.cu | 4 +- .../phi/kernels/gpu/graph_reindex_kernel.cu | 4 +- .../gpu/graph_sample_neighbors_kernel.cu | 4 +- .../gpu/graph_send_recv_grad_kernel.cu | 4 +- .../kernels/gpu/instance_norm_grad_kernel.cu | 10 +- .../phi/kernels/gpu/instance_norm_kernel.cu | 4 +- .../kernels/gpu/interpolate_grad_kernel.cu | 54 +++--- paddle/phi/kernels/gpu/interpolate_kernel.cu | 54 +++--- paddle/phi/kernels/gpu/label_smooth_kernel.cu | 2 +- .../phi/kernels/gpu/layer_norm_grad_kernel.cu | 4 +- paddle/phi/kernels/gpu/layer_norm_kernel.cu | 4 +- .../phi/kernels/gpu/nll_loss_grad_kernel.cu | 2 +- paddle/phi/kernels/gpu/nll_loss_kernel.cu | 2 +- .../phi/kernels/gpu/psroi_pool_grad_kernel.cu | 2 +- paddle/phi/kernels/gpu/psroi_pool_kernel.cu | 2 +- paddle/phi/kernels/gpu/rnn_grad_kernel.cu.cc | 2 +- paddle/phi/kernels/gpu/rnn_kernel.cu.cc | 2 +- .../phi/kernels/gpu/roi_align_grad_kernel.cu | 2 +- paddle/phi/kernels/gpu/roi_align_kernel.cu | 2 +- .../phi/kernels/gpu/roi_pool_grad_kernel.cu | 2 +- paddle/phi/kernels/gpu/roi_pool_kernel.cu | 2 +- paddle/phi/kernels/gpu/sgd_kernel.cu | 6 +- .../kernels/gpudnn/conv_grad_grad_kernel.cu | 12 +- paddle/phi/kernels/graph_reindex_kernel.h | 4 +- .../kernels/graph_sample_neighbors_kernel.h | 4 +- .../phi/kernels/graph_send_recv_grad_kernel.h | 4 +- .../hierarchical_sigmoid_grad_kernel.h | 6 +- .../phi/kernels/hierarchical_sigmoid_kernel.h | 6 +- .../phi/kernels/impl/activation_grad_impl.h | 2 +- .../bilinear_tensor_product_kernel_impl.h | 2 +- .../kernels/impl/conv_grad_grad_kernel_impl.h | 4 +- .../impl/deformable_conv_grad_kernel_impl.h | 2 +- .../impl/deformable_conv_kernel_impl.h | 2 +- .../impl/elementwise_grad_kernel_impl.h | 22 +-- .../phi/kernels/impl/expand_as_kernel_impl.h | 2 +- .../kernels/impl/matmul_grad_kernel_impl.h | 14 +- .../phi/kernels/impl/momentum_kernel_impl.h | 8 +- paddle/phi/kernels/impl/rmsprop_kernel_impl.h | 57 ++++-- .../impl/segment_pool_grad_kernel_impl.h | 2 +- .../kernels/impl/warpctc_grad_kernel_impl.h | 2 +- paddle/phi/kernels/impl/warpctc_kernel_impl.h | 4 +- .../phi/kernels/instance_norm_grad_kernel.h | 10 +- paddle/phi/kernels/instance_norm_kernel.h | 4 +- paddle/phi/kernels/interpolate_grad_kernel.h | 6 +- paddle/phi/kernels/interpolate_kernel.h | 34 ++-- paddle/phi/kernels/label_smooth_kernel.h | 2 +- paddle/phi/kernels/layer_norm_grad_kernel.h | 4 +- paddle/phi/kernels/layer_norm_kernel.h | 4 +- paddle/phi/kernels/matmul_grad_kernel.h | 14 +- paddle/phi/kernels/momentum_kernel.h | 4 +- paddle/phi/kernels/nll_loss_grad_kernel.h | 2 +- paddle/phi/kernels/nll_loss_kernel.cc | 2 +- paddle/phi/kernels/nll_loss_kernel.h | 2 +- paddle/phi/kernels/psroi_pool_grad_kernel.h | 2 +- paddle/phi/kernels/psroi_pool_kernel.h | 2 +- paddle/phi/kernels/rmsprop_kernel.h | 4 +- paddle/phi/kernels/rnn_grad_kernel.h | 2 +- paddle/phi/kernels/rnn_kernel.h | 2 +- paddle/phi/kernels/roi_align_grad_kernel.h | 2 +- paddle/phi/kernels/roi_align_kernel.h | 2 +- paddle/phi/kernels/roi_pool_grad_kernel.h | 2 +- paddle/phi/kernels/roi_pool_kernel.h | 2 +- paddle/phi/kernels/segment_pool_grad_kernel.h | 2 +- .../phi/kernels/selected_rows/adam_kernel.h | 4 +- .../phi/kernels/selected_rows/adamw_kernel.h | 4 +- .../kernels/selected_rows/assign_kernel.cc | 2 +- .../kernels/selected_rows/cpu/adam_kernel.cc | 4 +- .../kernels/selected_rows/cpu/adamw_kernel.cc | 4 +- .../kernels/selected_rows/gpu/adam_kernel.cu | 4 +- .../kernels/selected_rows/gpu/adamw_kernel.cu | 4 +- .../hierarchical_sigmoid_grad_kernel.cc | 6 +- .../hierarchical_sigmoid_grad_kernel.h | 6 +- paddle/phi/kernels/sgd_kernel.h | 6 +- paddle/phi/kernels/warpctc_grad_kernel.h | 2 +- paddle/phi/kernels/warpctc_kernel.h | 4 +- paddle/phi/kernels/yolov3_loss_grad_kernel.h | 2 +- paddle/phi/kernels/yolov3_loss_kernel.h | 2 +- python/paddle/utils/code_gen/api_base.py | 27 +-- python/paddle/utils/code_gen/type_mapping.py | 2 +- .../utils/code_gen/wrapped_infermeta_gen.py | 5 +- 176 files changed, 785 insertions(+), 911 deletions(-) diff --git a/paddle/fluid/eager/auto_code_generator/final_state_generator/eager_gen.py b/paddle/fluid/eager/auto_code_generator/final_state_generator/eager_gen.py index 403216813dd..d8b909c3bac 100644 --- a/paddle/fluid/eager/auto_code_generator/final_state_generator/eager_gen.py +++ b/paddle/fluid/eager/auto_code_generator/final_state_generator/eager_gen.py @@ -345,14 +345,14 @@ AMP_LOGIC_TEMPLATE = \ CREATE_PLAIN_OPTIONAL_TENSOR_TEMPLATE = \ """ - paddle::optional {}_optional = paddle::none; - if({}.initialized()) {}_optional = paddle::make_optional({}); + paddle::optional {}_optional; + if({}.initialized()) {}_optional = paddle::make_optional({}); """ CREATE_RECOVER_OPTIONAL_TENSOR_TEMPLATE = \ """ - paddle::optional {}_optional = paddle::none; - if( {}.impl() ) {}_optional = paddle::make_optional({}); + paddle::optional {}_optional; + if( {}.impl() ) {}_optional = paddle::make_optional({}); """ CHECK_BACKWARD_INPLACE_TEMPLATE = \ @@ -713,7 +713,7 @@ class DygraphFunctionGeneratorBase(FunctionGeneratorBase): if is_fwd_input: if is_optional: - set_tensor_wrappers = f"{indent}if({name}.get_ptr() != nullptr) grad_node->SetTensorWrapper{name}(*({name}.get_ptr()));" + set_tensor_wrappers = f"{indent}if({name}) grad_node->SetTensorWrapper{name}(*{name});" else: set_tensor_wrappers = f"{indent}grad_node->SetTensorWrapper{name}({name});" set_input_tensor_wrappers_list.append(set_tensor_wrappers) @@ -724,7 +724,7 @@ class DygraphFunctionGeneratorBase(FunctionGeneratorBase): ), AssertMessage(name, forward_outputs_position_map.keys()) if is_optional: - set_tensor_wrappers = f"{indent}if({name}.get_ptr() != nullptr) grad_node->SetTensorWrapper{name}(*({name}.get_ptr()));" + set_tensor_wrappers = f"{indent}if({name}) grad_node->SetTensorWrapper{name}(*{name});" else: set_tensor_wrappers = f"{indent}grad_node->SetTensorWrapper{name}({name});" set_output_tensor_wrappers_list.append(set_tensor_wrappers) @@ -888,15 +888,12 @@ class DygraphForwardFunctionGenerator(DygraphFunctionGeneratorBase): is_optional = (name in optional_inputs) if IsPlainTensorType(ttype): if is_optional: - arg_str = f"const paddle::optional {name}" + arg_str = f"const paddle::optional& {name}" amp_tensors_vector_optional_list.append( - f"if ({name}.get_ptr() != nullptr) amp_tensors_vector.push_back({{ *({name}.get_ptr()) }});\n" + f"if ({name}) amp_tensors_vector.push_back({{ *{name} }});\n" ) amp_autocast_optional_list.append( - f"auto NEW_{name}_temp_tensor = ({name}.get_ptr() != nullptr) ? egr::EagerAmpAutoCast(\"{name}\", *({name}.get_ptr()), amp_dst_dtype, op_name) : paddle::experimental::Tensor();\n" - ) - amp_autocast_optional_list.append( - f"auto NEW_{name} = ({name}.get_ptr() != nullptr) ? paddle::make_optional(NEW_{name}_temp_tensor) : {name};\n" + f"auto NEW_{name} = egr::EagerAmpAutoCast(\"{name}\", {name}, amp_dst_dtype, op_name);\n" ) else: if is_inplaced and forward_inplace_map and name in forward_inplace_map.keys( diff --git a/paddle/fluid/eager/eager_amp_auto_cast.h b/paddle/fluid/eager/eager_amp_auto_cast.h index ee9da41881b..c3fe3551ccb 100644 --- a/paddle/fluid/eager/eager_amp_auto_cast.h +++ b/paddle/fluid/eager/eager_amp_auto_cast.h @@ -60,7 +60,8 @@ inline std::vector EagerAmpAutoCasts( inline paddle::experimental::Tensor EagerAmpAutoCast( const std::string& input_name, const paddle::experimental::Tensor& input, - const paddle::experimental::DataType& dst_dtype, std::string op_name) { + const paddle::experimental::DataType& dst_dtype, + const std::string& op_name) { VLOG(6) << "AMP AmpAutoCasts:" << " input(" << input_name << ") dst_dtype(" << paddle::framework::DataType2String(dst_dtype) << ")."; @@ -87,4 +88,15 @@ inline paddle::experimental::Tensor EagerAmpAutoCast( return input; } +inline paddle::optional EagerAmpAutoCast( + const std::string& input_name, + const paddle::optional& input, + const paddle::experimental::DataType& dst_dtype, + const std::string& op_name) { + if (input) { + return EagerAmpAutoCast(input_name, *input, dst_dtype, op_name); + } + return paddle::none; +} + } // namespace egr diff --git a/paddle/fluid/eager/utils.cc b/paddle/fluid/eager/utils.cc index d22f4316d56..9ccd91ca657 100644 --- a/paddle/fluid/eager/utils.cc +++ b/paddle/fluid/eager/utils.cc @@ -73,7 +73,7 @@ AutogradMeta* EagerUtils::nullable_autograd_meta( } AutogradMeta* EagerUtils::nullable_autograd_meta( - paddle::optional target) { + const paddle::optional& target) { if (target.get_ptr() != nullptr) { return EagerUtils::nullable_autograd_meta(*(target.get_ptr())); } diff --git a/paddle/fluid/eager/utils.h b/paddle/fluid/eager/utils.h index 7f5864ec887..63baebca53c 100644 --- a/paddle/fluid/eager/utils.h +++ b/paddle/fluid/eager/utils.h @@ -125,7 +125,7 @@ class EagerUtils { static AutogradMeta* nullable_autograd_meta( const paddle::experimental::Tensor& target); static AutogradMeta* nullable_autograd_meta( - paddle::optional target); + const paddle::optional& target); static std::vector nullable_autograd_meta( const std::vector& targets); static std::vector nullable_autograd_meta( diff --git a/paddle/fluid/framework/infershape_utils.cc b/paddle/fluid/framework/infershape_utils.cc index 2a8ffbf431e..d7901a83b85 100644 --- a/paddle/fluid/framework/infershape_utils.cc +++ b/paddle/fluid/framework/infershape_utils.cc @@ -349,14 +349,6 @@ const phi::MetaTensor& CompatInferMetaContext::InputAt(size_t idx) const { return compat_inputs_.at(idx); } -paddle::optional -CompatInferMetaContext::OptionalInputAt(size_t idx) const { - const auto& input = compat_inputs_.at(idx); - return input.initialized() - ? paddle::optional{input} - : paddle::optional{paddle::none}; -} - std::vector CompatInferMetaContext::InputsBetween( size_t start, size_t end) const { std::vector result; @@ -370,7 +362,7 @@ std::vector CompatInferMetaContext::InputsBetween( return result; } -paddle::optional> +paddle::optional> CompatInferMetaContext::OptionalInputsBetween(size_t start, size_t end) const { const auto& first = compat_inputs_.at(start); @@ -383,10 +375,10 @@ CompatInferMetaContext::OptionalInputsBetween(size_t start, size_t end) const { result.emplace_back(in.initialized() ? &in : nullptr); } - return paddle::optional>(result); + return paddle::optional>( + std::move(result)); } - return paddle::optional>( - paddle::none); + return paddle::none; } phi::MetaTensor* CompatInferMetaContext::MutableOutputAt(size_t idx) { diff --git a/paddle/fluid/framework/infershape_utils.h b/paddle/fluid/framework/infershape_utils.h index 855e873b309..04ac1ff59f7 100644 --- a/paddle/fluid/framework/infershape_utils.h +++ b/paddle/fluid/framework/infershape_utils.h @@ -59,6 +59,12 @@ class CompatMetaTensor : public phi::MetaTensor { bool initialized() const override { return initialized_; }; + operator unspecified_bool_type() const override { + return initialized_ ? unspecified_bool_true : 0; + } + + bool operator!() const override { return !initialized_; } + private: const LoD& GetRuntimeLoD() const { auto* var = BOOST_GET_CONST(Variable*, var_); @@ -107,13 +113,11 @@ class CompatInferMetaContext : public phi::InferMetaContext { outputs); const phi::MetaTensor& InputAt(size_t idx) const override; - paddle::optional OptionalInputAt( - size_t idx) const override; std::vector InputsBetween(size_t start, size_t end) const override; - paddle::optional> - OptionalInputsBetween(size_t start, size_t end) const override; + paddle::optional> OptionalInputsBetween( + size_t start, size_t end) const override; phi::MetaTensor* MutableOutputAt(size_t idx) override; std::vector MutableOutputBetween(size_t start, diff --git a/paddle/fluid/framework/operator.cc b/paddle/fluid/framework/operator.cc index d8eab0e9a72..afd1bf338c4 100644 --- a/paddle/fluid/framework/operator.cc +++ b/paddle/fluid/framework/operator.cc @@ -2370,15 +2370,12 @@ void OperatorWithKernel::BuildPhiKernelContext( // deal with optional here if ((it == ctx.inputs.end() || it->second.size() == 0) && (input_defs[i].type_index == - std::type_index( - typeid(paddle::optional)) || + std::type_index(typeid(paddle::optional)) || input_defs[i].type_index == - std::type_index( - typeid(paddle::optional)) || + std::type_index(typeid(paddle::optional)) || input_defs[i].type_index == - std::type_index( - typeid(paddle::optional< - const std::vector>)))) { + std::type_index(typeid( + paddle::optional>)))) { pt_kernel_context->EmplaceBackInputWithoutSetRange(nullptr); auto end_idx = start_idx + 1; pt_kernel_context->AssignInputRange(std::make_pair(start_idx, end_idx), diff --git a/paddle/fluid/imperative/prepared_operator.h b/paddle/fluid/imperative/prepared_operator.h index 129f75e75de..ccc8d64517f 100644 --- a/paddle/fluid/imperative/prepared_operator.h +++ b/paddle/fluid/imperative/prepared_operator.h @@ -279,16 +279,14 @@ void BuildDygraphPhiKernelContext(const phi::KernelSignature& kernel_signature, if (it == ins.end()) { if (LIKELY(input_defs[i].type_index == - std::type_index( - typeid(paddle::optional)))) { + std::type_index(typeid(paddle::optional)))) { kernel_ctx->EmplaceBackInputWithoutSetRange(nullptr); auto end_idx = start_idx + 1; kernel_ctx->AssignInputRange(std::make_pair(start_idx, end_idx), i); continue; } else if (input_defs[i].type_index == - std::type_index( - typeid(paddle::optional< - const std::vector>))) { + std::type_index(typeid( + paddle::optional>))) { kernel_ctx->EmplaceBackInputWithoutSetRange(nullptr); auto end_idx = start_idx + 1; kernel_ctx->AssignInputRange(std::make_pair(start_idx, end_idx), i); diff --git a/paddle/fluid/operators/fused/fused_dropout_test.h b/paddle/fluid/operators/fused/fused_dropout_test.h index a9b72a9cdf3..8527610247b 100644 --- a/paddle/fluid/operators/fused/fused_dropout_test.h +++ b/paddle/fluid/operators/fused/fused_dropout_test.h @@ -138,7 +138,7 @@ void LayerNorm(const std::vector> &scale, const platform::CUDADeviceContext &ctx) { framework::Scope scope; auto place = ctx.GetPlace(); - paddle::optional scale_opt = paddle::none; + paddle::optional scale_opt; if (scale.size() > 0) { auto var_scale = scope.Var("Scale"); auto tensor_scale = var_scale->GetMutable(); @@ -147,7 +147,7 @@ void LayerNorm(const std::vector> &scale, scale_opt = *tensor_scale; } - paddle::optional bias_opt = paddle::none; + paddle::optional bias_opt; if (bias.size() > 0) { auto var_bias = scope.Var("Bias"); auto tensor_bias = var_bias->GetMutable(); diff --git a/paddle/fluid/operators/inplace_abn_op.cc b/paddle/fluid/operators/inplace_abn_op.cc index 344b104b594..d420d0319bf 100644 --- a/paddle/fluid/operators/inplace_abn_op.cc +++ b/paddle/fluid/operators/inplace_abn_op.cc @@ -292,9 +292,9 @@ class InplaceABNGradKernel : public framework::OpKernel { auto* mean = ctx.Input("ReserveSpace"); auto* variance = ctx.Input("ReserveSpace"); - paddle::optional space_opt = paddle::none; - paddle::optional mean_opt = paddle::none; - paddle::optional variance_opt = paddle::none; + paddle::optional space_opt; + paddle::optional mean_opt; + paddle::optional variance_opt; if (reserve_space != nullptr) { space_opt = *reserve_space; diff --git a/paddle/fluid/operators/inplace_abn_op.cu b/paddle/fluid/operators/inplace_abn_op.cu index 6c16210ced0..6476023fcd2 100644 --- a/paddle/fluid/operators/inplace_abn_op.cu +++ b/paddle/fluid/operators/inplace_abn_op.cu @@ -120,9 +120,9 @@ class InplaceABNGradKernel auto* mean = ctx.Input("ReserveSpace"); auto* variance = ctx.Input("ReserveSpace"); - paddle::optional space_opt = paddle::none; - paddle::optional mean_opt = paddle::none; - paddle::optional variance_opt = paddle::none; + paddle::optional space_opt; + paddle::optional mean_opt; + paddle::optional variance_opt; if (reserve_space != nullptr) { space_opt = *reserve_space; diff --git a/paddle/fluid/operators/optimizers/dgc_momentum_op.h b/paddle/fluid/operators/optimizers/dgc_momentum_op.h index fc954e60a8c..9d6ecf414e6 100644 --- a/paddle/fluid/operators/optimizers/dgc_momentum_op.h +++ b/paddle/fluid/operators/optimizers/dgc_momentum_op.h @@ -72,8 +72,7 @@ class DGCMomentumKernel : public framework::OpKernel { auto* velocity_out = context.Output("VelocityOut"); auto* master_param_out = context.Output("MasterParamOut"); - paddle::optional master_param_opt = - paddle::none; + paddle::optional master_param_opt(paddle::none); float mu = context.Attr("mu"); bool use_nesterov = context.Attr("use_nesterov"); std::string regularization_method = @@ -117,8 +116,7 @@ class DGCMomentumKernel : public framework::OpKernel { auto* param_out = context.Output("ParamOut"); auto* master_param_out = context.Output("MasterParamOut"); - paddle::optional master_param_opt = - paddle::none; + paddle::optional master_param_opt(paddle::none); if (multi_precision) { auto* master_param = context.Input("MasterParam"); master_param_opt = *master_param; @@ -149,8 +147,7 @@ class DGCMomentumKernel : public framework::OpKernel { auto* param_out = context.Output("ParamOut"); auto* master_param_out = context.Output("MasterParamOut"); - paddle::optional master_param_opt = - paddle::none; + paddle::optional master_param_opt(paddle::none); if (multi_precision) { auto* master_param = context.Input("MasterParam"); master_param_opt = *master_param; diff --git a/paddle/fluid/pybind/eager_utils.cc b/paddle/fluid/pybind/eager_utils.cc index 4707f757d8b..efa0fe2cb58 100644 --- a/paddle/fluid/pybind/eager_utils.cc +++ b/paddle/fluid/pybind/eager_utils.cc @@ -765,7 +765,7 @@ PyObject* ToPyObject(const std::unordered_map& value) { // For Final State Dygraph, // We directly use paddle::optional(Tensor) as dispensable Tensor -paddle::optional GetOptionalTensorFromArgs( +paddle::optional GetOptionalTensorFromArgs( const std::string& op_type, const std::string& arg_name, PyObject* args, ssize_t arg_idx, bool dispensable) { PyObject* obj = PyTuple_GET_ITEM(args, arg_idx); @@ -784,7 +784,7 @@ paddle::optional GetOptionalTensorFromArgs( } if (PyObject_IsInstance(obj, reinterpret_cast(p_tensor_type))) { - return paddle::make_optional( + return paddle::make_optional( reinterpret_cast(obj)->tensor); } else { PADDLE_THROW(platform::errors::InvalidArgument( diff --git a/paddle/fluid/pybind/eager_utils.h b/paddle/fluid/pybind/eager_utils.h index c8e1cd4ad0b..7f94f6c90e5 100644 --- a/paddle/fluid/pybind/eager_utils.h +++ b/paddle/fluid/pybind/eager_utils.h @@ -185,7 +185,7 @@ paddle::Place CastPyArg2Place(PyObject* obj, const std::string& op_type, paddle::DataType CastPyArg2DataType(PyObject* obj, const std::string& op_type, ssize_t arg_pos); -paddle::optional GetOptionalTensorFromArgs( +paddle::optional GetOptionalTensorFromArgs( const std::string& op_type, const std::string& arg_name, PyObject* args, ssize_t arg_idx, bool dispensable = false); diff --git a/paddle/phi/api/lib/api_custom_impl.cc b/paddle/phi/api/lib/api_custom_impl.cc index 8a845c331cc..b6431fcbe69 100644 --- a/paddle/phi/api/lib/api_custom_impl.cc +++ b/paddle/phi/api/lib/api_custom_impl.cc @@ -41,8 +41,8 @@ std::tuple adam_impl( const Tensor& moment2, const Tensor& beta1_pow, const Tensor& beta2_pow, - paddle::optional master_param, - paddle::optional skip_update, + const paddle::optional& master_param, + const paddle::optional& skip_update, const Scalar& beta1, const Scalar& beta2, const Scalar& epsilon, @@ -87,11 +87,8 @@ std::tuple adam_impl( auto input_moment2 = PrepareData(moment2, kernel.InputAt(4), {}); auto input_beta1_pow = PrepareData(beta1_pow, kernel.InputAt(5), {}); auto input_beta2_pow = PrepareData(beta2_pow, kernel.InputAt(6), {}); - paddle::optional input_master_param(paddle::none); - auto input_master_param_ptr = - PrepareData(master_param, kernel.InputAt(7), {}); - paddle::optional input_skip_update(paddle::none); - auto input_skip_update_ptr = PrepareData(skip_update, kernel.InputAt(8), {}); + auto input_master_param = PrepareData(master_param, kernel.InputAt(7), {}); + auto input_skip_update = PrepareData(skip_update, kernel.InputAt(8), {}); std::tuple api_output; auto kernel_out_0 = input_param.get(); @@ -100,40 +97,13 @@ std::tuple adam_impl( auto kernel_out_3 = input_beta1_pow.get(); auto kernel_out_4 = input_beta2_pow.get(); phi::DenseTensor* kernel_out_5 = nullptr; - if (input_master_param_ptr) { - input_master_param = - paddle::make_optional(*input_master_param_ptr); - kernel_out_5 = - paddle::make_optional(*input_master_param_ptr) - .get_ptr(); + if (input_master_param) { + kernel_out_5 = input_master_param.get_ptr(); } - if (input_skip_update_ptr) { - input_skip_update = - paddle::make_optional(*input_skip_update_ptr); - } - - paddle::optional input_meta_ref_master_param( - paddle::none); - phi::DenseTensor dt; - phi::MetaTensor input_meta_tmp_master_param(dt); - if (input_master_param_ptr) { - input_meta_tmp_master_param.set_dtype(input_master_param_ptr->dtype()); - input_meta_tmp_master_param.set_dims(input_master_param_ptr->dims()); - input_meta_tmp_master_param.set_layout(input_master_param_ptr->layout()); - input_meta_ref_master_param = input_meta_tmp_master_param; - } + auto input_meta_ref_master_param = MakeMetaTensor(input_master_param); - paddle::optional input_meta_ref_skip_update( - paddle::none); - phi::DenseTensor dt1; - phi::MetaTensor input_meta_tmp_skip_update(dt1); - if (input_skip_update_ptr) { - input_meta_tmp_skip_update.set_dtype(input_skip_update_ptr->dtype()); - input_meta_tmp_skip_update.set_dims(input_skip_update_ptr->dims()); - input_meta_tmp_skip_update.set_layout(input_skip_update_ptr->layout()); - input_meta_ref_skip_update = input_meta_tmp_skip_update; - } + auto input_meta_ref_skip_update = MakeMetaTensor(input_skip_update); phi::MetaTensor meta_out_0(kernel_out_0); phi::MetaTensor meta_out_1(kernel_out_1); @@ -176,8 +146,8 @@ std::tuple adam_impl( const phi::DenseTensor&, const phi::DenseTensor&, const phi::DenseTensor&, - paddle::optional, - paddle::optional, + const paddle::optional&, + const paddle::optional&, const Scalar&, const Scalar&, const Scalar&, @@ -250,8 +220,8 @@ std::tuple adam_impl( const phi::DenseTensor&, const phi::DenseTensor&, const phi::DenseTensor&, - paddle::optional, - paddle::optional, + const paddle::optional&, + const paddle::optional&, const Scalar&, const Scalar&, const Scalar&, @@ -304,8 +274,8 @@ std::tuple adamw_impl( const Tensor& moment2, const Tensor& beta1_pow, const Tensor& beta2_pow, - paddle::optional master_param, - paddle::optional skip_update, + const paddle::optional& master_param, + const paddle::optional& skip_update, const Scalar& beta1, const Scalar& beta2, const Scalar& epsilon, @@ -350,11 +320,8 @@ std::tuple adamw_impl( auto input_moment2 = PrepareData(moment2, kernel.InputAt(4), {}); auto input_beta1_pow = PrepareData(beta1_pow, kernel.InputAt(5), {}); auto input_beta2_pow = PrepareData(beta2_pow, kernel.InputAt(6), {}); - paddle::optional input_master_param(paddle::none); - auto input_master_param_ptr = - PrepareData(master_param, kernel.InputAt(7), {}); - paddle::optional input_skip_update(paddle::none); - auto input_skip_update_ptr = PrepareData(skip_update, kernel.InputAt(8), {}); + auto input_master_param = PrepareData(master_param, kernel.InputAt(7), {}); + auto input_skip_update = PrepareData(skip_update, kernel.InputAt(8), {}); std::tuple api_output; auto kernel_out_0 = input_param.get(); @@ -363,40 +330,13 @@ std::tuple adamw_impl( auto kernel_out_3 = input_beta1_pow.get(); auto kernel_out_4 = input_beta2_pow.get(); phi::DenseTensor* kernel_out_5 = nullptr; - if (input_master_param_ptr) { - input_master_param = - paddle::make_optional(*input_master_param_ptr); - kernel_out_5 = - paddle::make_optional(*input_master_param_ptr) - .get_ptr(); + if (input_master_param) { + kernel_out_5 = input_master_param.get_ptr(); } - if (input_skip_update_ptr) { - input_skip_update = - paddle::make_optional(*input_skip_update_ptr); - } - - paddle::optional input_meta_ref_master_param( - paddle::none); - phi::DenseTensor dt; - phi::MetaTensor input_meta_tmp_master_param(dt); - if (input_master_param_ptr) { - input_meta_tmp_master_param.set_dtype(input_master_param_ptr->dtype()); - input_meta_tmp_master_param.set_dims(input_master_param_ptr->dims()); - input_meta_tmp_master_param.set_layout(input_master_param_ptr->layout()); - input_meta_ref_master_param = input_meta_tmp_master_param; - } + auto input_meta_ref_master_param = MakeMetaTensor(input_master_param); - paddle::optional input_meta_ref_skip_update( - paddle::none); - phi::DenseTensor dt1; - phi::MetaTensor input_meta_tmp_skip_update(dt1); - if (input_skip_update_ptr) { - input_meta_tmp_skip_update.set_dtype(input_skip_update_ptr->dtype()); - input_meta_tmp_skip_update.set_dims(input_skip_update_ptr->dims()); - input_meta_tmp_skip_update.set_layout(input_skip_update_ptr->layout()); - input_meta_ref_skip_update = input_meta_tmp_skip_update; - } + auto input_meta_ref_skip_update = MakeMetaTensor(input_skip_update); phi::MetaTensor meta_out_0(kernel_out_0); phi::MetaTensor meta_out_1(kernel_out_1); @@ -439,8 +379,8 @@ std::tuple adamw_impl( const phi::DenseTensor&, const phi::DenseTensor&, const phi::DenseTensor&, - paddle::optional, - paddle::optional, + const paddle::optional&, + const paddle::optional&, const Scalar&, const Scalar&, const Scalar&, @@ -760,7 +700,7 @@ std::tuple momentum_impl( const Tensor& grad, const Tensor& velocity, const Tensor& learning_rate, - paddle::optional master_param, + const paddle::optional& master_param, float mu, bool use_nesterov, const std::string& regularization_method, @@ -801,32 +741,18 @@ std::tuple momentum_impl( auto input_grad = PrepareData(grad, kernel.InputAt(1), {}); auto input_velocity = PrepareData(velocity, kernel.InputAt(2), {}); auto input_learning_rate = PrepareData(learning_rate, kernel.InputAt(3), {}); - paddle::optional input_master_param(paddle::none); - auto input_master_param_ptr = - PrepareData(master_param, kernel.InputAt(4), {}); + auto input_master_param = PrepareData(master_param, kernel.InputAt(4), {}); std::tuple api_output; auto kernel_out_0 = input_param.get(); auto kernel_out_1 = input_velocity.get(); phi::DenseTensor* kernel_out_2 = nullptr; - if (input_master_param_ptr) { - input_master_param = - paddle::make_optional(*input_master_param_ptr); - kernel_out_2 = - paddle::make_optional(*input_master_param_ptr) - .get_ptr(); + if (input_master_param) { + kernel_out_2 = input_master_param.get_ptr(); } - paddle::optional input_meta_ref_master_param( - paddle::none); - phi::DenseTensor dt; - phi::MetaTensor input_meta_tmp_master_param(dt); - if (input_master_param_ptr) { - input_meta_tmp_master_param.set_dtype(input_master_param_ptr->dtype()); - input_meta_tmp_master_param.set_dims(input_master_param_ptr->dims()); - input_meta_tmp_master_param.set_layout(input_master_param_ptr->layout()); - input_meta_ref_master_param = input_meta_tmp_master_param; - } + auto input_meta_ref_master_param = MakeMetaTensor(input_master_param); + phi::MetaTensor meta_out_0(kernel_out_0); phi::MetaTensor meta_out_1(kernel_out_1); if (kernel_out_2) { @@ -867,7 +793,7 @@ std::tuple momentum_impl( const phi::DenseTensor&, const phi::DenseTensor&, const phi::DenseTensor&, - paddle::optional, + const paddle::optional&, float, bool, const std::string&, @@ -902,7 +828,7 @@ std::tuple sgd_impl( const Tensor& param, const Tensor& learning_rate, const Tensor& grad, - paddle::optional master_param, + const paddle::optional& master_param, bool multi_precision) { DataType kernel_data_type = ParseDataType(param); auto kernel_key_set = ParseKernelKeyByInputArgs(param, learning_rate, grad); @@ -940,17 +866,8 @@ std::tuple sgd_impl( if (phi::DenseTensor::classof(param_tensor.get())) { auto in_param = PrepareData(param, kernel.InputAt(0), {}); - auto in_master_param = PrepareData(master_param, kernel.InputAt(3), {}); - - paddle::optional in_master_param_opt = - master_param - ? paddle::make_optional(*in_master_param) - : paddle::none; - auto master_param_meta = MakeMetaTensor(in_master_param_opt); - paddle::optional master_param_meta_opt = - master_param - ? paddle::make_optional(*master_param_meta) - : paddle::none; + auto in_master_param_opt = PrepareData(master_param, kernel.InputAt(3), {}); + auto master_param_meta_opt = MakeMetaTensor(in_master_param_opt); phi::DenseTensor* kernel_out_0 = SetKernelOutput(kernel_key.backend(), &std::get<0>(out)); @@ -974,7 +891,7 @@ std::tuple sgd_impl( const phi::DenseTensor&, const phi::DenseTensor&, const phi::DenseTensor&, - paddle::optional, + const paddle::optional&, bool, phi::DenseTensor*, phi::DenseTensor*); @@ -1003,7 +920,7 @@ std::tuple sgd_impl( const phi::DenseTensor&, const phi::DenseTensor&, const phi::SelectedRows&, - paddle::optional, + const paddle::optional&, bool, phi::DenseTensor*, phi::DenseTensor*); @@ -1020,16 +937,8 @@ std::tuple sgd_impl( } else { auto in_param = TensorToSelectedRows(param); auto in_grad = TensorToSelectedRows(grad); - auto in_master_param = TensorToSelectedRows(master_param); - auto in_master_param_opt = - master_param - ? paddle::make_optional(*in_master_param) - : paddle::none; + auto in_master_param_opt = TensorToSelectedRows(master_param); auto master_param_meta = MakeMetaTensor(in_master_param_opt); - paddle::optional master_param_meta_opt = - master_param - ? paddle::make_optional(*master_param_meta) - : paddle::none; phi::SelectedRows* kernel_out_0 = SetSelectedRowsKernelOutput(kernel_key.backend(), &std::get<0>(out)); @@ -1041,7 +950,7 @@ std::tuple sgd_impl( SgdInferMeta(MakeMetaTensor(*in_param), MakeMetaTensor(*in_learning_rate), MakeMetaTensor(*in_grad), - master_param_meta_opt, + master_param_meta, multi_precision, &meta_out_0, &meta_out_1); @@ -1051,7 +960,7 @@ std::tuple sgd_impl( const phi::SelectedRows&, const phi::DenseTensor&, const phi::SelectedRows&, - paddle::optional, + const paddle::optional&, bool, phi::SelectedRows*, phi::SelectedRows*); diff --git a/paddle/phi/api/lib/api_custom_impl.h b/paddle/phi/api/lib/api_custom_impl.h index d88a134654c..f8ccbb36c5c 100644 --- a/paddle/phi/api/lib/api_custom_impl.h +++ b/paddle/phi/api/lib/api_custom_impl.h @@ -39,8 +39,8 @@ std::tuple adam_impl( const Tensor& moment2, const Tensor& beta1_pow, const Tensor& beta2_pow, - paddle::optional master_param, - paddle::optional skip_update, + const paddle::optional& master_param, + const paddle::optional& skip_update, const Scalar& beta1, const Scalar& beta2, const Scalar& epsilon, @@ -57,8 +57,8 @@ std::tuple adamw_impl( const Tensor& moment2, const Tensor& beta1_pow, const Tensor& beta2_pow, - paddle::optional master_param, - paddle::optional skip_update, + const paddle::optional& master_param, + const paddle::optional& skip_update, const Scalar& beta1, const Scalar& beta2, const Scalar& epsilon, @@ -107,7 +107,7 @@ std::tuple momentum_impl( const Tensor& grad, const Tensor& velocity, const Tensor& learning_rate, - paddle::optional master_param, + const paddle::optional& master_param, float mu, bool use_nesterov, const std::string& regularization_method, @@ -119,7 +119,7 @@ std::tuple sgd_impl( const Tensor& param, const Tensor& learning_rate, const Tensor& grad, - paddle::optional master_param, + const paddle::optional& master_param, bool multi_precision); ////////////////// Backward(grad) api impls ////////////////////// diff --git a/paddle/phi/api/lib/api_gen_utils.cc b/paddle/phi/api/lib/api_gen_utils.cc index 2111829b8d6..633bb1a32a1 100644 --- a/paddle/phi/api/lib/api_gen_utils.cc +++ b/paddle/phi/api/lib/api_gen_utils.cc @@ -23,10 +23,10 @@ std::shared_ptr TensorToDenseTensor(const Tensor& tensor) { return std::static_pointer_cast(tensor.impl()); } -std::shared_ptr TensorToDenseTensor( - const paddle::optional& tensor) { +paddle::optional TensorToDenseTensor( + const paddle::optional& tensor) { if (tensor) { - return std::static_pointer_cast(tensor->impl()); + return {*std::static_pointer_cast(tensor->impl())}; } return nullptr; } @@ -48,10 +48,10 @@ std::shared_ptr TensorToSelectedRows(const Tensor& tensor) { return std::static_pointer_cast(tensor.impl()); } -std::shared_ptr TensorToSelectedRows( - const paddle::optional& tensor) { +paddle::optional TensorToSelectedRows( + const paddle::optional& tensor) { if (tensor) { - return std::static_pointer_cast(tensor->impl()); + return {*std::static_pointer_cast(tensor->impl())}; } return nullptr; } @@ -66,12 +66,12 @@ phi::MetaTensor MakeMetaTensor(const phi::DenseTensor& tensor) { return phi::MetaTensor(tensor); } -paddle::optional MakeMetaTensor( - const paddle::optional& tensor) { +phi::MetaTensor MakeMetaTensor( + const paddle::optional& tensor) { if (tensor) { return {phi::MetaTensor(*tensor)}; } - return {paddle::none}; + return phi::MetaTensor(); } std::vector MakeMetaTensor( @@ -98,12 +98,12 @@ phi::MetaTensor MakeMetaTensor(const phi::SelectedRows& tensor) { return phi::MetaTensor(tensor); } -paddle::optional MakeMetaTensor( - const paddle::optional& tensor) { +phi::MetaTensor MakeMetaTensor( + const paddle::optional& tensor) { if (tensor) { return {phi::MetaTensor(*tensor)}; } - return {paddle::none}; + return phi::MetaTensor(); } phi::MetaTensor MakeMetaTensor(const phi::StringTensor& tensor) { diff --git a/paddle/phi/api/lib/api_gen_utils.h b/paddle/phi/api/lib/api_gen_utils.h index 097178ae0d9..83656a7b528 100644 --- a/paddle/phi/api/lib/api_gen_utils.h +++ b/paddle/phi/api/lib/api_gen_utils.h @@ -32,7 +32,7 @@ enum class TensorType { DENSE_TENSOR, SPARSE_CSR, SPARSE_COO, STRING_TENSOR }; std::shared_ptr TensorToDenseTensor(const Tensor& tensor); -std::shared_ptr TensorToDenseTensor( +paddle::optional TensorToDenseTensor( const paddle::optional& tensor); std::unique_ptr> TensorToDenseTensor( @@ -40,8 +40,8 @@ std::unique_ptr> TensorToDenseTensor( std::shared_ptr TensorToSelectedRows(const Tensor& tensor); -std::shared_ptr TensorToSelectedRows( - const paddle::optional& tensor); +paddle::optional TensorToSelectedRows( + const paddle::optional& tensor); std::shared_ptr TensorToStringTensor(const Tensor& tensor); @@ -49,8 +49,8 @@ std::shared_ptr TensorToStringTensor(const Tensor& tensor); phi::MetaTensor MakeMetaTensor(const phi::DenseTensor& tensor); -paddle::optional MakeMetaTensor( - const paddle::optional& tensor); +phi::MetaTensor MakeMetaTensor( + const paddle::optional& tensor); std::vector MakeMetaTensor( const std::vector& tensors); @@ -60,8 +60,8 @@ std::vector MakeMetaTensor( phi::MetaTensor MakeMetaTensor(const phi::SelectedRows& tensor); -paddle::optional MakeMetaTensor( - const paddle::optional& tensor); +phi::MetaTensor MakeMetaTensor( + const paddle::optional& tensor); phi::MetaTensor MakeMetaTensor(const phi::StringTensor& tensor); diff --git a/paddle/phi/api/lib/data_transform.cc b/paddle/phi/api/lib/data_transform.cc index b00311061c9..598559cc4df 100644 --- a/paddle/phi/api/lib/data_transform.cc +++ b/paddle/phi/api/lib/data_transform.cc @@ -249,25 +249,14 @@ std::shared_ptr PrepareData( return nullptr; } -std::shared_ptr PrepareData( +paddle::optional PrepareData( const paddle::optional& input, const phi::TensorArgDef& target_args_def, const TransformFlag& transform_flag) { if (input) { - return PrepareData(*input, target_args_def, transform_flag); - } - return {nullptr}; -} - -std::shared_ptr PrepareData( - const paddle::optional input, - const phi::TensorArgDef& target_args_def, - const TransformFlag& transform_flag) { - if (input.get_ptr() != nullptr) { - return PrepareData(*(input.get_ptr()), target_args_def, transform_flag); + return {*PrepareData(*input, target_args_def, transform_flag)}; } - - return {nullptr}; + return paddle::none; } std::unique_ptr> PrepareData( diff --git a/paddle/phi/api/lib/data_transform.h b/paddle/phi/api/lib/data_transform.h index f5537961d0a..4d70078ef34 100644 --- a/paddle/phi/api/lib/data_transform.h +++ b/paddle/phi/api/lib/data_transform.h @@ -66,7 +66,7 @@ std::shared_ptr PrepareData( const phi::TensorArgDef& target_args_def, const TransformFlag& transform_flag); -std::shared_ptr PrepareData( +paddle::optional PrepareData( const paddle::optional& input, const phi::TensorArgDef& target_args_def, const TransformFlag& transform_flag); @@ -76,10 +76,5 @@ std::unique_ptr> PrepareData( const phi::TensorArgDef& target_args_def, const TransformFlag& transform_flag); -std::shared_ptr PrepareData( - const paddle::optional input, - const phi::TensorArgDef& target_args_def, - const TransformFlag& transform_flag); - } // namespace experimental } // namespace paddle diff --git a/paddle/phi/api/lib/kernel_dispatch.h b/paddle/phi/api/lib/kernel_dispatch.h index 29254a0486d..1091e0556da 100644 --- a/paddle/phi/api/lib/kernel_dispatch.h +++ b/paddle/phi/api/lib/kernel_dispatch.h @@ -125,8 +125,8 @@ struct KernelKeyParser : ArgsIterator { key_set.dtype = tensor.dtype(); } - void operator()(const paddle::optional x) { - if (x.get_ptr() != nullptr) { + void operator()(const paddle::optional& x) { + if (x) { const phi::TensorBase& tensor = *(x.get_ptr()->impl()); AssignKernelKeySet(tensor); } diff --git a/paddle/phi/core/infermeta_utils.cc b/paddle/phi/core/infermeta_utils.cc index 1d61f55f9dc..9d2b85435c7 100644 --- a/paddle/phi/core/infermeta_utils.cc +++ b/paddle/phi/core/infermeta_utils.cc @@ -65,14 +65,6 @@ const MetaTensor& InferMetaContext::InputAt(size_t idx) const { return inputs_.at(idx); } -paddle::optional InferMetaContext::OptionalInputAt( - size_t idx) const { - const auto& input = inputs_.at(idx); - return input.initialized() - ? paddle::optional{input} - : paddle::optional{paddle::none}; -} - std::vector InferMetaContext::InputsBetween( size_t start, size_t end) const { std::vector result; @@ -86,7 +78,7 @@ std::vector InferMetaContext::InputsBetween( return result; } -paddle::optional> +paddle::optional> InferMetaContext::OptionalInputsBetween(size_t start, size_t end) const { const auto& first = inputs_.at(start); @@ -99,9 +91,9 @@ InferMetaContext::OptionalInputsBetween(size_t start, size_t end) const { result.emplace_back(in.initialized() ? &in : nullptr); } - return paddle::optional>(result); + return paddle::optional>(std::move(result)); } - return paddle::optional>(paddle::none); + return paddle::none; } MetaTensor* InferMetaContext::MutableOutputAt(size_t idx) { diff --git a/paddle/phi/core/infermeta_utils.h b/paddle/phi/core/infermeta_utils.h index b974f2c868a..d27d8bc7624 100644 --- a/paddle/phi/core/infermeta_utils.h +++ b/paddle/phi/core/infermeta_utils.h @@ -50,11 +50,10 @@ class InferMetaContext { paddle::small_vector outputs); virtual const MetaTensor& InputAt(size_t idx) const; - virtual paddle::optional OptionalInputAt(size_t idx) const; virtual std::vector InputsBetween(size_t start, size_t end) const; - virtual paddle::optional> + virtual paddle::optional> OptionalInputsBetween(size_t start, size_t end) const; virtual MetaTensor* MutableOutputAt(size_t idx); @@ -151,24 +150,6 @@ struct InferMetaFnImpl { } }; - template - struct InferMetaFnCallHelper, Tail...> { - template - static void Call(InferMetaContext* ctx, PreviousArgs&... pargs) { - static_assert(attr_idx == 0, - "InferMeta's Input should appear before Attributes."); - static_assert(out_idx == 0, - "InferMeta's Input should appear before Outputs."); - const std::pair range = ctx->InputRangeAt(in_idx); - auto arg = ctx->OptionalInputAt(range.first); - - InferMetaFnCallHelper< - Tail...>::template Call(ctx, - pargs..., - arg); - } - }; - template struct InferMetaFnCallHelper&, Tail...> { template @@ -189,7 +170,7 @@ struct InferMetaFnImpl { template struct InferMetaFnCallHelper< - paddle::optional>, + const paddle::optional>&, Tail...> { template static void Call(InferMetaContext* ctx, PreviousArgs&... pargs) { @@ -198,7 +179,7 @@ struct InferMetaFnImpl { static_assert(out_idx == 0, "InferMeta's Input should appear before Outputs."); const std::pair range = ctx->InputRangeAt(in_idx); - paddle::optional> arg = + paddle::optional> arg = ctx->OptionalInputsBetween(range.first, range.second); InferMetaFnCallHelper< Tail...>::template Call(ctx, diff --git a/paddle/phi/core/kernel_context.h b/paddle/phi/core/kernel_context.h index 8b43239d352..0f155f445ec 100644 --- a/paddle/phi/core/kernel_context.h +++ b/paddle/phi/core/kernel_context.h @@ -81,11 +81,11 @@ class KernelContext { } template - paddle::optional OptionalInputAt(size_t idx) const { - const auto& input = inputs_.at(idx); - return input ? paddle::optional{static_cast< - const TensorType&>(*input)} - : paddle::optional{paddle::none}; + paddle::optional OptionalInputAt(size_t idx) const { + const auto* input = inputs_.at(idx); + return input ? paddle::make_optional( + *(static_cast(input))) + : paddle::none; } template @@ -99,7 +99,7 @@ class KernelContext { } template - paddle::optional> OptionalInputsBetween( + paddle::optional> OptionalInputsBetween( size_t start, size_t end) { const auto& first = inputs_.at(start); @@ -109,9 +109,9 @@ class KernelContext { auto* t = static_cast(inputs_.at(i)); v.emplace_back(t); } - return paddle::optional>(v); + return paddle::optional>(std::move(v)); } - return paddle::optional>(paddle::none); + return paddle::none; } template diff --git a/paddle/phi/core/kernel_registry.h b/paddle/phi/core/kernel_registry.h index 36ab9c081cc..41e1e2b53a9 100644 --- a/paddle/phi/core/kernel_registry.h +++ b/paddle/phi/core/kernel_registry.h @@ -76,20 +76,20 @@ struct KernelArgsParseFunctor { default_key.dtype(), arg_type); } else if (arg_type == std::type_index(typeid( - paddle::optional))) { + const paddle::optional&))) { args_def->AppendInput(default_key.backend(), default_tensor_layout, default_key.dtype(), arg_type); - } else if (arg_type == std::type_index(typeid( - paddle::optional< - const std::vector>))) { + } else if (arg_type == + std::type_index(typeid(const paddle::optional< + std::vector>&))) { args_def->AppendInput(default_key.backend(), default_tensor_layout, default_key.dtype(), arg_type); } else if (arg_type == std::type_index(typeid( - paddle::optional))) { + const paddle::optional&))) { args_def->AppendInput(default_key.backend(), default_tensor_layout, default_key.dtype(), diff --git a/paddle/phi/core/kernel_utils.h b/paddle/phi/core/kernel_utils.h index f548d1da2d4..d4765d1c4c3 100644 --- a/paddle/phi/core/kernel_utils.h +++ b/paddle/phi/core/kernel_utils.h @@ -85,7 +85,7 @@ namespace phi { #define PD_SPECIALIZE_KernelCallHelper_FOR_OPTIONAL_INPUT(tensor_type) \ template \ - struct KernelCallHelper, Tail...> { \ + struct KernelCallHelper&, Tail...> { \ template \ struct KernelCallHelper< \ - paddle::optional>, \ + const paddle::optional>&, \ Tail...> { \ template & range = ctx->InputRangeAt(in_idx); \ - paddle::optional> arg = \ + paddle::optional> arg = \ ctx->OptionalInputsBetween(range.first, range.second); \ KernelCallHelper:: \ template Compute( \ diff --git a/paddle/phi/core/meta_tensor.h b/paddle/phi/core/meta_tensor.h index 29afe0d0292..d277f32d8ea 100644 --- a/paddle/phi/core/meta_tensor.h +++ b/paddle/phi/core/meta_tensor.h @@ -39,7 +39,9 @@ struct MetaConfig { class MetaTensor { public: - MetaTensor() = default; + typedef void (*unspecified_bool_type)(); + + MetaTensor() : tensor_(nullptr) {} // supporting implicit construction is easier to use MetaTensor(TensorBase* tensor) : tensor_(tensor) {} // NOLINT @@ -68,12 +70,22 @@ class MetaTensor { virtual bool initialized() const; + virtual operator unspecified_bool_type() const { + return tensor_ == nullptr ? 0 : unspecified_bool_true; + } + + virtual bool operator!() const { return tensor_ == nullptr; } + + protected: + static void unspecified_bool_true() {} + private: // Because the lod in compiletime and runtime is different, // so `LoD` cannot in public methods const LoD& lod() const; TensorBase* tensor() const; - TensorBase* tensor_; + + TensorBase* tensor_ = nullptr; }; } // namespace phi diff --git a/paddle/phi/infermeta/backward.cc b/paddle/phi/infermeta/backward.cc index 6b13a28c708..78f8ff9e00c 100644 --- a/paddle/phi/infermeta/backward.cc +++ b/paddle/phi/infermeta/backward.cc @@ -188,7 +188,7 @@ void CrossEntropyWithSoftmaxGradInferMeta(const MetaTensor& label, void DeformableConvGradInferMeta(const MetaTensor& x, const MetaTensor& offset, const MetaTensor& filter, - paddle::optional mask, + const MetaTensor& mask, const MetaTensor& out_grad, const std::vector& strides, const std::vector& paddings, @@ -202,7 +202,7 @@ void DeformableConvGradInferMeta(const MetaTensor& x, MetaTensor* mask_grad) { GeneralTernaryGradInferMeta(x, offset, filter, dx, offset_grad, filter_grad); if (mask) { - UnchangedInferMeta(mask.get(), mask_grad); + UnchangedInferMeta(mask, mask_grad); } } @@ -314,7 +314,7 @@ void GumbelSoftmaxGradInferMeta(const MetaTensor& out, void InstanceNormGradInferMeta(const MetaTensor& x, const MetaTensor& y_grad, - paddle::optional scale, + const MetaTensor& scale, const MetaTensor& saved_mean, const MetaTensor& saved_variance, float epsilon, @@ -338,19 +338,18 @@ void InstanceNormGradInferMeta(const MetaTensor& x, bias_grad->set_dims({C}); } } -void InstanceNormDoubleGradInferMeta( - const MetaTensor& x, - paddle::optional scale, - const MetaTensor& saved_mean, - const MetaTensor& saved_variance, - const MetaTensor& dy, - paddle::optional ddx, - paddle::optional ddscale, - paddle::optional ddbias, - float epsilon, - MetaTensor* dx, - MetaTensor* dscale, - MetaTensor* ddy) { +void InstanceNormDoubleGradInferMeta(const MetaTensor& x, + const MetaTensor& scale, + const MetaTensor& saved_mean, + const MetaTensor& saved_variance, + const MetaTensor& dy, + const MetaTensor& ddx, + const MetaTensor& ddscale, + const MetaTensor& ddbias, + float epsilon, + MetaTensor* dx, + MetaTensor* dscale, + MetaTensor* ddy) { PADDLE_ENFORCE_NE( dx, nullptr, @@ -436,7 +435,7 @@ void MultiplexGradInferMeta(const MetaTensor& ids, void NllLossGradInferMeta(const MetaTensor& x, const MetaTensor& label, - paddle::optional weight, + const MetaTensor& weight, const MetaTensor& total_weight, const MetaTensor& out_grad, int64_t ignore_index, @@ -549,7 +548,7 @@ void PoolGradInferMeta(const MetaTensor& x, void PsroiPoolGradInferMeta(const MetaTensor& x, const MetaTensor& rois, - paddle::optional rois_num, + const MetaTensor& rois_num, const MetaTensor& dout, int pooled_height, int pooled_width, diff --git a/paddle/phi/infermeta/backward.h b/paddle/phi/infermeta/backward.h index 855b25d7ed4..b52734eb5b1 100644 --- a/paddle/phi/infermeta/backward.h +++ b/paddle/phi/infermeta/backward.h @@ -87,7 +87,7 @@ void CrossEntropyWithSoftmaxGradInferMeta(const MetaTensor& label, void DeformableConvGradInferMeta(const MetaTensor& x, const MetaTensor& offset, const MetaTensor& filter, - paddle::optional mask, + const MetaTensor& mask, const MetaTensor& out_grad, const std::vector& strides, const std::vector& paddings, @@ -146,7 +146,7 @@ void GumbelSoftmaxGradInferMeta(const MetaTensor& out, void InstanceNormGradInferMeta(const MetaTensor& x, const MetaTensor& y_grad, - paddle::optional scale, + const MetaTensor& scale, const MetaTensor& saved_mean, const MetaTensor& saved_variance, float epsilon, @@ -154,19 +154,18 @@ void InstanceNormGradInferMeta(const MetaTensor& x, MetaTensor* scale_grad, MetaTensor* bias_grad); -void InstanceNormDoubleGradInferMeta( - const MetaTensor& x, - paddle::optional scale, - const MetaTensor& saved_mean, - const MetaTensor& saved_variance, - const MetaTensor& dy, - paddle::optional ddx, - paddle::optional ddscale, - paddle::optional ddbias, - float epsilon, - MetaTensor* dx, - MetaTensor* dscale, - MetaTensor* ddy); +void InstanceNormDoubleGradInferMeta(const MetaTensor& x, + const MetaTensor& scale, + const MetaTensor& saved_mean, + const MetaTensor& saved_variance, + const MetaTensor& dy, + const MetaTensor& ddx, + const MetaTensor& ddscale, + const MetaTensor& ddbias, + float epsilon, + MetaTensor* dx, + MetaTensor* dscale, + MetaTensor* ddy); void KernelWithXShapeInferMeta(const MetaTensor& xshape, MetaTensor* dx); @@ -194,7 +193,7 @@ void MultiplexGradInferMeta(const MetaTensor& ids, void NllLossGradInferMeta(const MetaTensor& input, const MetaTensor& label, - paddle::optional weight, + const MetaTensor& weight, const MetaTensor& total_weight, const MetaTensor& out_grad, int64_t ignore_index, @@ -209,7 +208,7 @@ void PixelUnshuffleGradInferMeta(const MetaTensor& out_grad, void PsroiPoolGradInferMeta(const MetaTensor& x, const MetaTensor& rois, - paddle::optional rois_num, + const MetaTensor& rois_num, const MetaTensor& dout, int pooled_height, int pooled_width, diff --git a/paddle/phi/infermeta/binary.cc b/paddle/phi/infermeta/binary.cc index 837a43905e7..76b6fcdd52e 100644 --- a/paddle/phi/infermeta/binary.cc +++ b/paddle/phi/infermeta/binary.cc @@ -201,7 +201,7 @@ void BCELossInferMeta(const MetaTensor& input, } void BincountInferMeta(const MetaTensor& x, - const paddle::optional weights, + const MetaTensor& weights, int minlength, MetaTensor* out) { auto input_dim = x.dims(); @@ -220,8 +220,10 @@ void BincountInferMeta(const MetaTensor& x, "But the dimension of Input(X) is [%d]", input_dim.size())); - if (weights.is_initialized()) { - auto weights_dim = weights->dims(); + VLOG(1) << "####### CHECK weights"; + if (weights) { + auto weights_dim = weights.dims(); + VLOG(1) << "##### weights_dim " << weights_dim; PADDLE_ENFORCE_EQ(weights_dim.size(), 1, phi::errors::InvalidArgument( @@ -241,8 +243,8 @@ void BincountInferMeta(const MetaTensor& x, input_dim)); } out->set_dims(phi::make_ddim({-1})); - if (weights.is_initialized()) { - out->set_dtype(weights->dtype()); + if (weights) { + out->set_dtype(weights.dtype()); } else { out->set_dtype(x.dtype()); } @@ -864,7 +866,7 @@ void DistInferMeta(const MetaTensor& x, } void DropoutInferMeta(const MetaTensor& x, - paddle::optional seed_tensor, + const MetaTensor& seed_tensor, float p, bool is_test, const std::string& mode, @@ -982,7 +984,7 @@ void ElementwiseRawInferMeta(const MetaTensor& x, } void ExpandAsInferMeta(const MetaTensor& x, - paddle::optional y, + const MetaTensor& y, const std::vector& target_shape, MetaTensor* out) { #define MAX_RANK_SUPPORTED 6 diff --git a/paddle/phi/infermeta/binary.h b/paddle/phi/infermeta/binary.h index 192fa214c90..0c86e5389c4 100644 --- a/paddle/phi/infermeta/binary.h +++ b/paddle/phi/infermeta/binary.h @@ -56,7 +56,7 @@ void BCELossInferMeta(const MetaTensor& input, MetaConfig config = MetaConfig()); void BincountInferMeta(const MetaTensor& x, - const paddle::optional weights, + const MetaTensor& weights, int minlength, MetaTensor* out); @@ -136,7 +136,7 @@ void DistInferMeta(const MetaTensor& x, void DotInferMeta(const MetaTensor& x, const MetaTensor& y, MetaTensor* out); void DropoutInferMeta(const MetaTensor& x, - paddle::optional seed_tensor, + const MetaTensor& seed_tensor, float p, bool is_test, const std::string& mode, @@ -155,7 +155,7 @@ void ElementwiseRawInferMeta(const MetaTensor& x_meta, MetaTensor* out); void ExpandAsInferMeta(const MetaTensor& x, - paddle::optional y, + const MetaTensor& y, const std::vector& target_shape, MetaTensor* out); diff --git a/paddle/phi/infermeta/multiary.cc b/paddle/phi/infermeta/multiary.cc index 48c40673ab8..63f0d0c1eeb 100644 --- a/paddle/phi/infermeta/multiary.cc +++ b/paddle/phi/infermeta/multiary.cc @@ -100,8 +100,8 @@ void AdamInferMeta(const MetaTensor& param, const MetaTensor& moment2, const MetaTensor& beta1_pow, const MetaTensor& beta2_pow, - paddle::optional master_param, - paddle::optional skip_update, + const MetaTensor& master_param, + const MetaTensor& skip_update, const Scalar& beta1, const Scalar& beta2, const Scalar& epsilon, @@ -238,8 +238,8 @@ void AdamwInferMeta(const MetaTensor& param, const MetaTensor& moment2, const MetaTensor& beta1_pow, const MetaTensor& beta2_pow, - paddle::optional master_param, - paddle::optional skip_update, + const MetaTensor& master_param, + const MetaTensor& skip_update, const Scalar& beta1, const Scalar& beta2, const Scalar& epsilon, @@ -580,7 +580,7 @@ void BatchNormInferInferMeta(const MetaTensor& x, void BilinearTensorProductInferMeta(const MetaTensor& x, const MetaTensor& y, const MetaTensor& weight, - paddle::optional bias, + const MetaTensor& bias, MetaTensor* out, MetaConfig config) { auto x_dims = x.dims(); @@ -619,8 +619,8 @@ void BilinearTensorProductInferMeta(const MetaTensor& x, "The second dimension of input(Y) must be equal to " "the third dimension of the input(Weight).")); - if (bias.get_ptr()) { - auto bias_dims = bias->dims(); + if (bias) { + auto bias_dims = bias.dims(); PADDLE_ENFORCE_EQ(bias_dims.size(), 2UL, errors::InvalidArgument( @@ -772,7 +772,7 @@ inline int ConvOutputSize( void DeformableConvInferMeta(const MetaTensor& x, const MetaTensor& offset, const MetaTensor& filter, - paddle::optional mask, + const MetaTensor& mask, const std::vector& strides, const std::vector& paddings, const std::vector& dilations, @@ -918,7 +918,7 @@ void DeformableConvInferMeta(const MetaTensor& x, deformable_groups)); if (mask) { - auto mask_dims = mask->dims(); + auto mask_dims = mask.dims(); PADDLE_ENFORCE_EQ(output_shape[2], mask_dims[2], phi::errors::InvalidArgument( @@ -958,9 +958,9 @@ void DeformableConvInferMeta(const MetaTensor& x, void HierarchicalSigmoidInferMeta(const MetaTensor& x, const MetaTensor& w, const MetaTensor& label, - paddle::optional path, - paddle::optional code, - paddle::optional bias, + const MetaTensor& path, + const MetaTensor& code, + const MetaTensor& bias, int num_classes, bool remote_prefetch, int trainer_id, @@ -991,9 +991,9 @@ void HierarchicalSigmoidInferMeta(const MetaTensor& x, static void Interpolate1DInferShapeCheck( const MetaTensor& x, - paddle::optional out_size, - paddle::optional> size_tensor, - paddle::optional scale_tensor, + const MetaTensor& out_size, + const paddle::optional>& size_tensor, + const MetaTensor& scale_tensor, const std::string& data_layout_str, int out_d, int out_h, @@ -1048,7 +1048,7 @@ static void Interpolate1DInferShapeCheck( int out_w_tmp; if (scale_tensor) { - auto scale_tensor_dim = scale_tensor->dims(); + auto scale_tensor_dim = scale_tensor.dims(); PADDLE_ENFORCE_EQ( scale_tensor_dim.size(), 1, @@ -1086,7 +1086,7 @@ static void Interpolate1DInferShapeCheck( } if (out_size && config.is_runtime) { - auto out_size_dim = out_size->dims(); + auto out_size_dim = out_size.dims(); PADDLE_ENFORCE_EQ( out_size_dim.size(), 1, @@ -1118,9 +1118,9 @@ static void Interpolate1DInferShapeCheck( static void Interpolate2DInferShapeCheck( const MetaTensor& x, - paddle::optional out_size, - paddle::optional> size_tensor, - paddle::optional scale_tensor, + const MetaTensor& out_size, + const paddle::optional>& size_tensor, + const MetaTensor& scale_tensor, const std::string& data_layout_str, int out_d, int out_h, @@ -1178,7 +1178,7 @@ static void Interpolate2DInferShapeCheck( int out_h_tmp, out_w_tmp; if (scale_tensor) { - auto scale_tensor_dim = scale_tensor->dims(); + auto scale_tensor_dim = scale_tensor.dims(); PADDLE_ENFORCE_EQ( scale_tensor_dim.size(), 1, @@ -1231,7 +1231,7 @@ static void Interpolate2DInferShapeCheck( } if (out_size && config.is_runtime) { - auto out_size_dim = out_size->dims(); + auto out_size_dim = out_size.dims(); PADDLE_ENFORCE_EQ( out_size_dim.size(), 1, @@ -1263,9 +1263,9 @@ static void Interpolate2DInferShapeCheck( static void Interpolate3DInferShapeCheck( const MetaTensor& x, - paddle::optional out_size, - paddle::optional> size_tensor, - paddle::optional scale_tensor, + const MetaTensor& out_size, + const paddle::optional>& size_tensor, + const MetaTensor& scale_tensor, const std::string& data_layout_str, int out_d, int out_h, @@ -1321,7 +1321,7 @@ static void Interpolate3DInferShapeCheck( int out_d_tmp, out_h_tmp, out_w_tmp; if (scale_tensor) { - auto scale_tensor_dim = scale_tensor->dims(); + auto scale_tensor_dim = scale_tensor.dims(); PADDLE_ENFORCE_EQ( scale_tensor_dim.size(), 1, @@ -1389,7 +1389,7 @@ static void Interpolate3DInferShapeCheck( } if (out_size && config.is_runtime) { - auto out_size_dim = out_size->dims(); + auto out_size_dim = out_size.dims(); PADDLE_ENFORCE_EQ( out_size_dim.size(), 1, @@ -1419,9 +1419,9 @@ static void Interpolate3DInferShapeCheck( void InterpolateInferMeta( const MetaTensor& x, - paddle::optional out_size, - paddle::optional> size_tensor, - paddle::optional scale_tensor, + const MetaTensor& out_size, + const paddle::optional>& size_tensor, + const MetaTensor& scale_tensor, const std::string& data_layout_str, int out_d, int out_h, @@ -1546,7 +1546,7 @@ void MomentumInferMeta(const MetaTensor& param, const MetaTensor& grad, const MetaTensor& velocity, const MetaTensor& learning_rate, - paddle::optional master_param, + const MetaTensor& master_param, float mu, bool use_nesterov, const std::string& regularization_method, @@ -1709,7 +1709,7 @@ void MultiplexInferMeta(const std::vector& ins, void PsroiPoolInferMeta(const MetaTensor& x, const MetaTensor& rois, - paddle::optional rois_num, + const MetaTensor& rois_num, int pooled_height, int pooled_width, int output_channels, @@ -1732,8 +1732,8 @@ void PsroiPoolInferMeta(const MetaTensor& x, errors::InvalidArgument( "ROIs should be a 2-D LoDTensor of shape (num_rois, 4) " "given as [(x1, y1, x2, y2), ...]")); - if (rois_num.get_ptr()) { - auto rois_num_dims = rois_num->dims(); + if (rois_num) { + auto rois_num_dims = rois_num.dims(); PADDLE_ENFORCE_EQ( rois_num_dims.size(), 1, @@ -1787,7 +1787,7 @@ void RmspropInferMeta(const MetaTensor& param, const MetaTensor& grad, const MetaTensor& moment, const MetaTensor& learning_rate, - paddle::optional mean_grad, + const MetaTensor& mean_grad, float epsilon, float decay, float momentum, @@ -1837,14 +1837,14 @@ void RmspropInferMeta(const MetaTensor& param, mean_square_out->set_dtype(mean_square.dtype()); if (centered) { mean_grad_out->set_dims(param_dim); - mean_grad_out->set_dtype(mean_grad.get_ptr()->dtype()); + mean_grad_out->set_dtype(mean_grad.dtype()); } } void RnnInferMeta(const MetaTensor& x, const std::vector& pre_state, const std::vector& weight_list, - paddle::optional sequence_length, + const MetaTensor& sequence_length, float dropout_prob, bool is_bidirec, int input_size, @@ -1867,7 +1867,7 @@ void RnnInferMeta(const MetaTensor& x, in_dims.size())); if (sequence_length) { - auto seq_dims = sequence_length->dims(); + auto seq_dims = sequence_length.dims(); PADDLE_ENFORCE_EQ( in_dims[1], seq_dims[0], @@ -1929,7 +1929,7 @@ void RnnInferMeta(const MetaTensor& x, void SgdInferMeta(const MetaTensor& param, const MetaTensor& learning_rate, const MetaTensor& grad, - paddle::optional master_param, + const MetaTensor& master_param, bool multi_precision, MetaTensor* param_out, MetaTensor* master_param_out) { @@ -2006,8 +2006,8 @@ void UnchangedMultiInferMeta(const std::vector& x, void WarpctcInferMeta(const MetaTensor& logits, const MetaTensor& label, - const paddle::optional logits_length, - const paddle::optional labels_length, + const MetaTensor& logits_length, + const MetaTensor& labels_length, int blank, bool norm_by_times, MetaTensor* warpctc_grad, @@ -2015,7 +2015,7 @@ void WarpctcInferMeta(const MetaTensor& logits, auto logits_dims = logits.dims(); int sequence_width = 0; - if (logits_length.is_initialized()) { + if (logits_length) { sequence_width = logits_dims[2]; } else { sequence_width = @@ -2069,8 +2069,8 @@ void WhereInferMeta(const MetaTensor& condition, void GraphReindexInferMeta(const MetaTensor& x, const MetaTensor& neighbors, const MetaTensor& count, - paddle::optional hashtable_value, - paddle::optional hashtable_index, + const MetaTensor& hashtable_value, + const MetaTensor& hashtable_index, bool flag_buffer_hashtable, MetaTensor* reindex_src, MetaTensor* reindex_dst, @@ -2100,8 +2100,8 @@ void GraphReindexInferMeta(const MetaTensor& x, GraphReindexShapeCheck(neighbors.dims(), "Neighbors"); GraphReindexShapeCheck(count.dims(), "Count"); if (flag_buffer_hashtable) { - GraphReindexShapeCheck(hashtable_value->dims(), "HashTable_Value"); - GraphReindexShapeCheck(hashtable_index->dims(), "HashTable_Index"); + GraphReindexShapeCheck(hashtable_value.dims(), "HashTable_Value"); + GraphReindexShapeCheck(hashtable_index.dims(), "HashTable_Index"); } reindex_src->set_dims({-1}); @@ -2112,18 +2112,17 @@ void GraphReindexInferMeta(const MetaTensor& x, out_nodes->set_dtype(x.dtype()); } -void GraphSampleNeighborsInferMeta( - const MetaTensor& row, - const MetaTensor& col_ptr, - const MetaTensor& x, - paddle::optional eids, - paddle::optional perm_buffer, - int sample_size, - bool return_eids, - bool flag_perm_buffer, - MetaTensor* out, - MetaTensor* out_count, - MetaTensor* out_eids) { +void GraphSampleNeighborsInferMeta(const MetaTensor& row, + const MetaTensor& col_ptr, + const MetaTensor& x, + const MetaTensor& eids, + const MetaTensor& perm_buffer, + int sample_size, + bool return_eids, + bool flag_perm_buffer, + MetaTensor* out, + MetaTensor* out_count, + MetaTensor* out_eids) { // GSN: GraphSampleNeighbors auto GSNShapeCheck = [](const phi::DDim& dims, std::string tensor_name) { if (dims.size() == 2) { @@ -2149,12 +2148,12 @@ void GraphSampleNeighborsInferMeta( GSNShapeCheck(col_ptr.dims(), "Col_Ptr"); GSNShapeCheck(x.dims(), "X"); if (return_eids) { - GSNShapeCheck(eids->dims(), "Eids"); + GSNShapeCheck(eids.dims(), "Eids"); out_eids->set_dims({-1}); out_eids->set_dtype(row.dtype()); } if (flag_perm_buffer) { - GSNShapeCheck(perm_buffer->dims(), "Perm_Buffer"); + GSNShapeCheck(perm_buffer.dims(), "Perm_Buffer"); } out->set_dims({-1}); @@ -2166,7 +2165,7 @@ void GraphSampleNeighborsInferMeta( void Yolov3LossInferMeta(const MetaTensor& x, const MetaTensor& gt_box, const MetaTensor& gt_label, - const paddle::optional gt_score, + const MetaTensor& gt_score, const std::vector& anchors, const std::vector& anchor_mask, int class_num, @@ -2271,8 +2270,8 @@ void Yolov3LossInferMeta(const MetaTensor& x, "But received class_num(%s) < 0", class_num)); - if (gt_score.get_ptr()) { - auto dim_gtscore = gt_score->dims(); + if (gt_score) { + auto dim_gtscore = gt_score.dims(); PADDLE_ENFORCE_EQ( dim_gtscore.size(), 2, diff --git a/paddle/phi/infermeta/multiary.h b/paddle/phi/infermeta/multiary.h index 65b5819b602..54c6fccceb9 100644 --- a/paddle/phi/infermeta/multiary.h +++ b/paddle/phi/infermeta/multiary.h @@ -76,8 +76,8 @@ void AdamInferMeta(const MetaTensor& param, const MetaTensor& moment2, const MetaTensor& beta1_pow, const MetaTensor& beta2_pow, - paddle::optional master_param, - paddle::optional skip_update, + const MetaTensor& master_param, + const MetaTensor& skip_update, const Scalar& beta1, const Scalar& beta2, const Scalar& epsilon, @@ -99,8 +99,8 @@ void AdamwInferMeta(const MetaTensor& param, const MetaTensor& moment2, const MetaTensor& beta1_pow, const MetaTensor& beta2_pow, - paddle::optional master_param, - paddle::optional skip_update, + const MetaTensor& master_param, + const MetaTensor& skip_update, const Scalar& beta1, const Scalar& beta2, const Scalar& epsilon, @@ -170,7 +170,7 @@ void BatchNormInferInferMeta(const MetaTensor& x, void BilinearTensorProductInferMeta(const MetaTensor& x, const MetaTensor& y, const MetaTensor& weight, - paddle::optional bias, + const MetaTensor& bias, MetaTensor* out, MetaConfig config = MetaConfig()); @@ -185,7 +185,7 @@ void ConcatInferMeta(const std::vector& x, void DeformableConvInferMeta(const MetaTensor& x, const MetaTensor& offset, const MetaTensor& filter, - paddle::optional mask, + const MetaTensor& mask, const std::vector& strides, const std::vector& paddings, const std::vector& dilations, @@ -198,9 +198,9 @@ void DeformableConvInferMeta(const MetaTensor& x, void HierarchicalSigmoidInferMeta(const MetaTensor& x, const MetaTensor& w, const MetaTensor& label, - paddle::optional path, - paddle::optional code, - paddle::optional bias, + const MetaTensor& path, + const MetaTensor& code, + const MetaTensor& bias, int num_classes, bool remote_prefetch, int trainer_id, @@ -214,9 +214,9 @@ void HierarchicalSigmoidInferMeta(const MetaTensor& x, void InterpolateInferMeta( const MetaTensor& x, - paddle::optional out_size, - paddle::optional> size_tensor, - paddle::optional scale_tensor, + const MetaTensor& out_size, + const paddle::optional>& size_tensor, + const MetaTensor& scale_tensor, const std::string& data_layout, int out_d, int out_h, @@ -241,7 +241,7 @@ void MomentumInferMeta(const MetaTensor& param, const MetaTensor& grad, const MetaTensor& velocity, const MetaTensor& learning_rate, - paddle::optional master_param, + const MetaTensor& master_param, float mu, bool use_nesterov, const std::string& regularization_method, @@ -261,7 +261,7 @@ void MultiplexInferMeta(const std::vector& ins, void PsroiPoolInferMeta(const MetaTensor& x, const MetaTensor& rois, - paddle::optional rois_num, + const MetaTensor& rois_num, int pooled_height, int pooled_width, int output_channels, @@ -273,7 +273,7 @@ void RmspropInferMeta(const MetaTensor& param, const MetaTensor& grad, const MetaTensor& moment, const MetaTensor& learning_rate, - paddle::optional mean_grad, + const MetaTensor& mean_grad, float epsilon, float decay, float momentum, @@ -286,7 +286,7 @@ void RmspropInferMeta(const MetaTensor& param, void RnnInferMeta(const MetaTensor& x, const std::vector& pre_state, const std::vector& weight_list, - paddle::optional sequence_length, + const MetaTensor& sequence_length, float dropout_prob, bool is_bidirec, int input_size, @@ -303,7 +303,7 @@ void RnnInferMeta(const MetaTensor& x, void SgdInferMeta(const MetaTensor& param, const MetaTensor& learning_rate, const MetaTensor& grad, - paddle::optional master_param, + const MetaTensor& master_param, bool multi_precision, MetaTensor* param_out, MetaTensor* master_param_out); @@ -317,8 +317,8 @@ void UnchangedMultiInferMeta(const std::vector& x, void WarpctcInferMeta(const MetaTensor& logits, const MetaTensor& label, - const paddle::optional logits_length, - const paddle::optional labels_length, + const MetaTensor& logits_length, + const MetaTensor& labels_length, int blank, bool norm_by_times, MetaTensor* warpctc_grad, @@ -332,30 +332,29 @@ void WhereInferMeta(const MetaTensor& condition, void GraphReindexInferMeta(const MetaTensor& x, const MetaTensor& neighbors, const MetaTensor& count, - paddle::optional hashtable_value, - paddle::optional hashtable_index, + const MetaTensor& hashtable_value, + const MetaTensor& hashtable_index, bool flag_buffer_hashtable, MetaTensor* reindex_src, MetaTensor* reindex_dst, MetaTensor* out_nodes); -void GraphSampleNeighborsInferMeta( - const MetaTensor& row, - const MetaTensor& col_ptr, - const MetaTensor& x, - paddle::optional eids, - paddle::optional perm_buffer, - int sample_size, - bool return_eids, - bool flag_perm_buffer, - MetaTensor* out, - MetaTensor* out_count, - MetaTensor* out_eids); +void GraphSampleNeighborsInferMeta(const MetaTensor& row, + const MetaTensor& col_ptr, + const MetaTensor& x, + const MetaTensor& eids, + const MetaTensor& perm_buffer, + int sample_size, + bool return_eids, + bool flag_perm_buffer, + MetaTensor* out, + MetaTensor* out_count, + MetaTensor* out_eids); void Yolov3LossInferMeta(const MetaTensor& x, const MetaTensor& gt_box, const MetaTensor& gt_label, - const paddle::optional gt_score, + const MetaTensor& gt_score, const std::vector& anchors, const std::vector& anchor_mask, int class_num, diff --git a/paddle/phi/infermeta/ternary.cc b/paddle/phi/infermeta/ternary.cc index e3f946b247f..58ae6b2058f 100644 --- a/paddle/phi/infermeta/ternary.cc +++ b/paddle/phi/infermeta/ternary.cc @@ -192,8 +192,8 @@ void ArangeInferMeta(const MetaTensor& start, } void InstanceNormInferMeta(const MetaTensor& x, - paddle::optional scale, - paddle::optional bias, + const MetaTensor& scale, + const MetaTensor& bias, float epsilon, MetaTensor* y, MetaTensor* saved_mean, @@ -242,9 +242,8 @@ void InstanceNormInferMeta(const MetaTensor& x, auto N = x_dims[0]; auto C = x_dims[1]; auto NxC = N * C; - const auto scale_ptr = scale.get_ptr(); - if (scale_ptr) { - auto scale_dim = scale_ptr->dims(); + if (scale) { + auto scale_dim = scale.dims(); PADDLE_ENFORCE_EQ( scale_dim.size(), 1UL, @@ -265,9 +264,8 @@ void InstanceNormInferMeta(const MetaTensor& x, scale_dim[0])); } } - const auto bias_ptr = bias.get_ptr(); - if (bias_ptr) { - auto bias_dim = bias_ptr->dims(); + if (bias) { + auto bias_dim = bias.dims(); PADDLE_ENFORCE_EQ( bias_dim.size(), 1UL, @@ -365,8 +363,8 @@ void GraphSendRecvInferMeta(const MetaTensor& x, } void LayerNormInferMeta(const MetaTensor& x, - paddle::optional scale, - paddle::optional bias, + const MetaTensor& scale, + const MetaTensor& bias, float epsilon, int begin_norm_axis, bool is_test, @@ -388,19 +386,19 @@ void LayerNormInferMeta(const MetaTensor& x, auto matrix_dim = phi::flatten_to_2d(x_dim, begin_norm_axis); int left = static_cast(matrix_dim[0]); int right = static_cast(matrix_dim[1]); - if (scale.get_ptr() != nullptr) { - PADDLE_ENFORCE_EQ(scale->dims().size(), + if (scale) { + PADDLE_ENFORCE_EQ(scale.dims().size(), 1, phi::errors::InvalidArgument( "The dimensions of Input(Scale) must be 1, but " "received dimensions of" "Input(Scale) is [%d]", - scale->dims().size())); + scale.dims().size())); } - if (config.is_runtime && scale.get_ptr() != nullptr) { + if (config.is_runtime && scale) { PADDLE_ENFORCE_EQ( - scale->dims()[0], + scale.dims()[0], right, phi::errors::InvalidArgument( "The first dimension value of Input(Scale) must equal to be the" @@ -408,21 +406,21 @@ void LayerNormInferMeta(const MetaTensor& x, "But received the first dimension value of Input(Scale) is" "[%d], the second dimension value of the flattened 2D matrix of" " Input(Scale) is [%d].", - scale->dims()[0], + scale.dims()[0], right)); } - if (bias.get_ptr() != nullptr) { - PADDLE_ENFORCE_EQ(bias->dims().size(), + if (bias) { + PADDLE_ENFORCE_EQ(bias.dims().size(), 1, phi::errors::InvalidArgument( "The dimensions of Input(Bias) must be 1, but " "received dimensions of" "Input(Bias) is [%d]", - bias->dims().size())); + bias.dims().size())); } - if (config.is_runtime && bias.get_ptr() != nullptr) { + if (config.is_runtime && bias) { PADDLE_ENFORCE_EQ( - bias->dims()[0], + bias.dims()[0], right, phi::errors::InvalidArgument( "The first dimension value of Input(Bias) must equal to be the" @@ -430,7 +428,7 @@ void LayerNormInferMeta(const MetaTensor& x, "But received the first dimension value of Input(Bias) is" "[%d], the second dimension value of the flattened 2D matrix of" " Input(Bias) is [%d].", - bias->dims()[0], + bias.dims()[0], right)); } @@ -445,19 +443,19 @@ void LayerNormInferMeta(const MetaTensor& x, } void LayerNormGradInferMeta(const MetaTensor& x, - paddle::optional y, - paddle::optional z, + const MetaTensor& y, + const MetaTensor& z, MetaTensor* dx, MetaTensor* dy, MetaTensor* dz) { if (dx) { dx->share_meta(x); } - if (dy && (y.get_ptr() != nullptr)) { - dy->share_meta(*y.get_ptr()); + if (dy && y) { + dy->share_meta(y); } - if (dz && (z.get_ptr() != nullptr)) { - dz->share_meta(*z.get_ptr()); + if (dz && z) { + dz->share_meta(z); } } @@ -517,7 +515,7 @@ void LinspaceInferMeta(const MetaTensor& start, void NllLossRawInferMeta(const MetaTensor& input, const MetaTensor& label, - paddle::optional weight, + const MetaTensor& weight, int64_t ignore_index, const std::string& reduction, MetaTensor* out, @@ -542,8 +540,8 @@ void NllLossRawInferMeta(const MetaTensor& input, " batch_size is [%s].", x_dims[0], label_dims[0])); - if (weight.get_ptr() != nullptr) { - auto w_dims = weight->dims(); + if (weight) { + auto w_dims = weight.dims(); PADDLE_ENFORCE_EQ( w_dims.size(), 1, @@ -607,7 +605,7 @@ void PutAlongAxisInferMeta(const MetaTensor& x, void RoiAlignInferMeta(const MetaTensor& x, const MetaTensor& boxes, - paddle::optional boxes_num, + const MetaTensor& boxes_num, int pooled_height, int pooled_width, float spatial_scale, @@ -619,7 +617,7 @@ void RoiAlignInferMeta(const MetaTensor& x, auto boxes_dims = boxes.dims(); if (boxes_num) { - auto boxes_num_dims = boxes_num->dims(); + auto boxes_num_dims = boxes_num.dims(); PADDLE_ENFORCE_EQ( boxes_num_dims.size(), 1, @@ -684,7 +682,7 @@ void RoiAlignInferMeta(const MetaTensor& x, void RoiPoolInferMeta(const MetaTensor& x, const MetaTensor& boxes, - paddle::optional boxes_num, + const MetaTensor& boxes_num, int pooled_height, int pooled_width, float spatial_scale, @@ -694,7 +692,7 @@ void RoiPoolInferMeta(const MetaTensor& x, auto boxes_dims = boxes.dims(); if (boxes_num) { - auto boxes_num_dims = boxes_num->dims(); + auto boxes_num_dims = boxes_num.dims(); PADDLE_ENFORCE_EQ( boxes_num_dims.size(), 1, diff --git a/paddle/phi/infermeta/ternary.h b/paddle/phi/infermeta/ternary.h index b2fb30a4da2..760011ad829 100644 --- a/paddle/phi/infermeta/ternary.h +++ b/paddle/phi/infermeta/ternary.h @@ -53,8 +53,8 @@ void ArangeInferMeta(const MetaTensor& start, MetaTensor* out); void InstanceNormInferMeta(const MetaTensor& x, - paddle::optional scale, - paddle::optional bias, + const MetaTensor& scale, + const MetaTensor& bias, float epsilon, MetaTensor* y, MetaTensor* saved_mean, @@ -70,8 +70,8 @@ void GraphSendRecvInferMeta(const MetaTensor& x, MetaTensor* dst_count); void LayerNormInferMeta(const MetaTensor& x, - paddle::optional scale, - paddle::optional bias, + const MetaTensor& scale, + const MetaTensor& bias, float epsilon, int begin_norm_axis, bool is_test, @@ -81,8 +81,8 @@ void LayerNormInferMeta(const MetaTensor& x, MetaConfig config = MetaConfig()); void LayerNormGradInferMeta(const MetaTensor& x, - paddle::optional y, - paddle::optional z, + const MetaTensor& y, + const MetaTensor& z, MetaTensor* dx, MetaTensor* dy, MetaTensor* dz); @@ -105,7 +105,7 @@ void LinspaceInferMeta(const MetaTensor& start, void NllLossRawInferMeta(const MetaTensor& input, const MetaTensor& label, - paddle::optional weight, + const MetaTensor& weight, int64_t ignore_index, const std::string& reduction, MetaTensor* out, @@ -121,7 +121,7 @@ void PutAlongAxisInferMeta(const MetaTensor& x, void RoiAlignInferMeta(const MetaTensor& x, const MetaTensor& boxes, - paddle::optional boxes_num, + const MetaTensor& boxes_num, int pooled_height, int pooled_width, float spatial_scale, @@ -132,7 +132,7 @@ void RoiAlignInferMeta(const MetaTensor& x, void RoiPoolInferMeta(const MetaTensor& x, const MetaTensor& boxes, - paddle::optional boxes_num, + const MetaTensor& boxes_num, int pooled_height, int pooled_width, float spatial_scale, diff --git a/paddle/phi/kernels/activation_grad_kernel.h b/paddle/phi/kernels/activation_grad_kernel.h index 5d7af6cca94..8e63a0fd22a 100644 --- a/paddle/phi/kernels/activation_grad_kernel.h +++ b/paddle/phi/kernels/activation_grad_kernel.h @@ -137,7 +137,7 @@ void SigmoidTripleGradKernel(const Context& dev_ctx, const DenseTensor& dout, const DenseTensor& ddx, const DenseTensor& d_dout_new, - paddle::optional d_ddout, + const paddle::optional& d_ddout, DenseTensor* d_out_new, DenseTensor* d_dout, DenseTensor* d_ddx); diff --git a/paddle/phi/kernels/adam_kernel.h b/paddle/phi/kernels/adam_kernel.h index f144d40d2b6..0bdf05f8e51 100644 --- a/paddle/phi/kernels/adam_kernel.h +++ b/paddle/phi/kernels/adam_kernel.h @@ -28,8 +28,8 @@ void AdamDenseKernel(const Context& dev_ctx, const DenseTensor& moment2, const DenseTensor& beta1_pow, const DenseTensor& beta2_pow, - paddle::optional master_param, - paddle::optional skip_update, + const paddle::optional& master_param, + const paddle::optional& skip_update, const Scalar& beta1, const Scalar& beta2, const Scalar& epsilon, diff --git a/paddle/phi/kernels/adamw_kernel.h b/paddle/phi/kernels/adamw_kernel.h index d7b072adda4..5cbb38143ff 100644 --- a/paddle/phi/kernels/adamw_kernel.h +++ b/paddle/phi/kernels/adamw_kernel.h @@ -28,8 +28,8 @@ void AdamwDenseKernel(const Context& dev_ctx, const DenseTensor& moment2, const DenseTensor& beta1_pow, const DenseTensor& beta2_pow, - paddle::optional master_param, - paddle::optional skip_update, + const paddle::optional& master_param, + const paddle::optional& skip_update, const Scalar& beta1, const Scalar& beta2, const Scalar& epsilon, diff --git a/paddle/phi/kernels/assign_kernel.cc b/paddle/phi/kernels/assign_kernel.cc index 720ebb5b78c..5ed9d72a503 100644 --- a/paddle/phi/kernels/assign_kernel.cc +++ b/paddle/phi/kernels/assign_kernel.cc @@ -31,7 +31,7 @@ void AssignKernel(const Context& dev_ctx, template void AssignRawKernel(const Context& dev_ctx, - paddle::optional x, + const paddle::optional& x, DenseTensor* out) { if (x) { if (!x->IsInitialized()) { diff --git a/paddle/phi/kernels/assign_kernel.h b/paddle/phi/kernels/assign_kernel.h index 6881ac9f0ee..0294dc950de 100644 --- a/paddle/phi/kernels/assign_kernel.h +++ b/paddle/phi/kernels/assign_kernel.h @@ -31,7 +31,7 @@ void AssignKernel(const Context& dev_ctx, // this looks weird template void AssignRawKernel(const Context& dev_ctx, - paddle::optional x, + const paddle::optional& x, DenseTensor* out); template diff --git a/paddle/phi/kernels/batch_norm_grad_kernel.h b/paddle/phi/kernels/batch_norm_grad_kernel.h index 2cb3b16a022..3de2f69f452 100644 --- a/paddle/phi/kernels/batch_norm_grad_kernel.h +++ b/paddle/phi/kernels/batch_norm_grad_kernel.h @@ -24,11 +24,11 @@ void BatchNormGradRawKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& scale, const DenseTensor& bias, - paddle::optional mean, - paddle::optional variance, + const paddle::optional& mean, + const paddle::optional& variance, const DenseTensor& saved_mean, const DenseTensor& saved_variance, - paddle::optional reserve_space, + const paddle::optional& reserve_space, const DenseTensor& y_grad, float momentum, float epsilon, @@ -47,11 +47,11 @@ void BatchNormGradKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& scale, const DenseTensor& bias, - paddle::optional mean, - paddle::optional variance, + const paddle::optional& mean, + const paddle::optional& variance, const DenseTensor& saved_mean, const DenseTensor& saved_variance, - paddle::optional reserve_space, + const paddle::optional& reserve_space, const DenseTensor& y_grad, float momentum, float epsilon, @@ -68,8 +68,8 @@ template void BatchNormDoubleGradKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& scale, - paddle::optional mean, - paddle::optional variance, + const paddle::optional& mean, + const paddle::optional& variance, const DenseTensor& saved_mean, const DenseTensor& saved_variance, const DenseTensor& y_grad, diff --git a/paddle/phi/kernels/bilinear_tensor_product_kernel.h b/paddle/phi/kernels/bilinear_tensor_product_kernel.h index b34e8946ddd..bd01ed94868 100644 --- a/paddle/phi/kernels/bilinear_tensor_product_kernel.h +++ b/paddle/phi/kernels/bilinear_tensor_product_kernel.h @@ -24,7 +24,7 @@ void BilinearTensorProductKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& y, const DenseTensor& weight, - paddle::optional bias, + const paddle::optional& bias, DenseTensor* out); } // namespace phi diff --git a/paddle/phi/kernels/bincount_kernel.h b/paddle/phi/kernels/bincount_kernel.h index 3ba69d36548..e110b6e014b 100644 --- a/paddle/phi/kernels/bincount_kernel.h +++ b/paddle/phi/kernels/bincount_kernel.h @@ -21,7 +21,7 @@ namespace phi { template void BincountKernel(const Context& dev_ctx, const DenseTensor& x, - const paddle::optional weights, + const paddle::optional& weights, int minlength, DenseTensor* out); diff --git a/paddle/phi/kernels/conv_grad_grad_kernel.h b/paddle/phi/kernels/conv_grad_grad_kernel.h index 0a359d778a6..799c8721c3c 100644 --- a/paddle/phi/kernels/conv_grad_grad_kernel.h +++ b/paddle/phi/kernels/conv_grad_grad_kernel.h @@ -23,8 +23,8 @@ void ConvGradGradKernel(const Context& dev_ctx, const DenseTensor& input, const DenseTensor& filter, const DenseTensor& out_grad, - paddle::optional input_grad_grad, - paddle::optional filter_grad_grad, + const paddle::optional& input_grad_grad, + const paddle::optional& filter_grad_grad, const std::vector& strides, const std::vector& paddings, const std::string& paddding_algorithm, @@ -40,8 +40,8 @@ void ConvGradGradKernel(const Context& dev_ctx, template void Conv3DGradGradKernel(const Context& dev_ctx, - paddle::optional input_grad_grad, - paddle::optional filter_grad_grad, + const paddle::optional& input_grad_grad, + const paddle::optional& filter_grad_grad, const DenseTensor& out_grad, const DenseTensor& input, const DenseTensor& filter, diff --git a/paddle/phi/kernels/cpu/adam_kernel.cc b/paddle/phi/kernels/cpu/adam_kernel.cc index 1e0f5c4df9f..339d690310f 100644 --- a/paddle/phi/kernels/cpu/adam_kernel.cc +++ b/paddle/phi/kernels/cpu/adam_kernel.cc @@ -36,8 +36,8 @@ void AdamDenseKernel(const Context& dev_ctx, const DenseTensor& moment2, const DenseTensor& beta1_pow, const DenseTensor& beta2_pow, - paddle::optional master_param, - paddle::optional skip_update, + const paddle::optional& master_param, + const paddle::optional& skip_update, const Scalar& beta1, const Scalar& beta2, const Scalar& epsilon, diff --git a/paddle/phi/kernels/cpu/adamw_kernel.cc b/paddle/phi/kernels/cpu/adamw_kernel.cc index f2c98fded4d..93092133291 100644 --- a/paddle/phi/kernels/cpu/adamw_kernel.cc +++ b/paddle/phi/kernels/cpu/adamw_kernel.cc @@ -35,8 +35,8 @@ void AdamwDenseKernel(const Context& dev_ctx, const DenseTensor& moment2, const DenseTensor& beta1_pow, const DenseTensor& beta2_pow, - paddle::optional master_param, - paddle::optional skip_update, + const paddle::optional& master_param, + const paddle::optional& skip_update, const Scalar& beta1, const Scalar& beta2, const Scalar& epsilon, diff --git a/paddle/phi/kernels/cpu/batch_norm_grad_kernel.cc b/paddle/phi/kernels/cpu/batch_norm_grad_kernel.cc index bf01c24f4ff..366a08e59fe 100644 --- a/paddle/phi/kernels/cpu/batch_norm_grad_kernel.cc +++ b/paddle/phi/kernels/cpu/batch_norm_grad_kernel.cc @@ -41,11 +41,11 @@ void BatchNormGradRawKernel(const Context& ctx, const DenseTensor& x, const DenseTensor& scale, const DenseTensor& bias, - paddle::optional mean, - paddle::optional variance, + const paddle::optional& mean, + const paddle::optional& variance, const DenseTensor& saved_mean, const DenseTensor& saved_variance, - paddle::optional reserve_space, + const paddle::optional& reserve_space, const DenseTensor& y_grad, float momentum, float epsilon, @@ -300,11 +300,11 @@ void BatchNormGradKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& scale, const DenseTensor& bias, - paddle::optional mean, - paddle::optional variance, + const paddle::optional& mean, + const paddle::optional& variance, const DenseTensor& saved_mean, const DenseTensor& saved_variance, - paddle::optional reserve_space, + const paddle::optional& reserve_space, const DenseTensor& y_grad, float momentum, float epsilon, @@ -343,8 +343,8 @@ template void BatchNormDoubleGradKernel(const Context& ctx, const DenseTensor& x, const DenseTensor& scale, - paddle::optional mean, - paddle::optional variance, + const paddle::optional& mean, + const paddle::optional& variance, const DenseTensor& saved_mean, const DenseTensor& saved_variance, const DenseTensor& y_grad, diff --git a/paddle/phi/kernels/cpu/bincount_kernel.cc b/paddle/phi/kernels/cpu/bincount_kernel.cc index c9dc44c1e04..8163953c1e0 100644 --- a/paddle/phi/kernels/cpu/bincount_kernel.cc +++ b/paddle/phi/kernels/cpu/bincount_kernel.cc @@ -23,7 +23,7 @@ namespace phi { template void BincountInner(const Context& dev_ctx, const DenseTensor& x, - const paddle::optional weights, + const paddle::optional& weights, int minlength, DenseTensor* out) { const DenseTensor* input = &x; @@ -85,7 +85,7 @@ void BincountInner(const Context& dev_ctx, template void BincountKernel(const Context& dev_ctx, const DenseTensor& x, - const paddle::optional weights, + const paddle::optional& weights, int minlength, DenseTensor* out) { if (x.dtype() == DataType::INT32) { diff --git a/paddle/phi/kernels/cpu/conv_grad_grad_kernel.cc b/paddle/phi/kernels/cpu/conv_grad_grad_kernel.cc index 4966c998dd3..c52f2614150 100644 --- a/paddle/phi/kernels/cpu/conv_grad_grad_kernel.cc +++ b/paddle/phi/kernels/cpu/conv_grad_grad_kernel.cc @@ -21,8 +21,8 @@ namespace phi { template void Conv3DGradGradKernel(const Context& ctx, - paddle::optional input_grad_grad, - paddle::optional filter_grad_grad, + const paddle::optional& input_grad_grad, + const paddle::optional& filter_grad_grad, const DenseTensor& out_grad, const DenseTensor& input, const DenseTensor& filter, diff --git a/paddle/phi/kernels/cpu/dropout_kernel.cc b/paddle/phi/kernels/cpu/dropout_kernel.cc index c00aedef8c6..fa12e505e42 100644 --- a/paddle/phi/kernels/cpu/dropout_kernel.cc +++ b/paddle/phi/kernels/cpu/dropout_kernel.cc @@ -23,7 +23,7 @@ namespace phi { template void DropoutRawKernel(const Context& dev_ctx, const DenseTensor& x, - paddle::optional seed_tensor, + const paddle::optional& seed_tensor, float p, bool is_test, const std::string& mode, diff --git a/paddle/phi/kernels/cpu/elementwise_add_grad_kernel.cc b/paddle/phi/kernels/cpu/elementwise_add_grad_kernel.cc index f8a89b997b4..434866b840c 100644 --- a/paddle/phi/kernels/cpu/elementwise_add_grad_kernel.cc +++ b/paddle/phi/kernels/cpu/elementwise_add_grad_kernel.cc @@ -63,8 +63,8 @@ template void AddDoubleGradKernel(const Context& dev_ctx, const DenseTensor& y, const DenseTensor& dout, - paddle::optional ddx, - paddle::optional ddy, + const paddle::optional& ddx, + const paddle::optional& ddy, int axis, DenseTensor* ddout) { phi::AddDoubleGradImpl(dev_ctx, y, ddx, ddy, dout, axis, ddout); diff --git a/paddle/phi/kernels/cpu/elementwise_subtract_grad_kernel.cc b/paddle/phi/kernels/cpu/elementwise_subtract_grad_kernel.cc index b86ead04dbc..03bb47aaa97 100644 --- a/paddle/phi/kernels/cpu/elementwise_subtract_grad_kernel.cc +++ b/paddle/phi/kernels/cpu/elementwise_subtract_grad_kernel.cc @@ -39,8 +39,8 @@ template void SubtractDoubleGradKernel(const Context& dev_ctx, const DenseTensor& y, const DenseTensor& dout, - paddle::optional ddx, - paddle::optional ddy, + const paddle::optional& ddx, + const paddle::optional& ddy, int axis, DenseTensor* ddout) { phi::SubtractDoubleGradImpl(dev_ctx, y, ddx, ddy, dout, axis, ddout); diff --git a/paddle/phi/kernels/cpu/graph_reindex_kernel.cc b/paddle/phi/kernels/cpu/graph_reindex_kernel.cc index d6454b47964..c0a88f32227 100644 --- a/paddle/phi/kernels/cpu/graph_reindex_kernel.cc +++ b/paddle/phi/kernels/cpu/graph_reindex_kernel.cc @@ -27,8 +27,8 @@ void GraphReindexKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& neighbors, const DenseTensor& count, - paddle::optional hashtable_value, - paddle::optional hashtable_index, + const paddle::optional& hashtable_value, + const paddle::optional& hashtable_index, bool flag_buffer_hashtable, DenseTensor* reindex_src, DenseTensor* reindex_dst, diff --git a/paddle/phi/kernels/cpu/graph_sample_neighbors_kernel.cc b/paddle/phi/kernels/cpu/graph_sample_neighbors_kernel.cc index b4321a85ab2..70aac053417 100644 --- a/paddle/phi/kernels/cpu/graph_sample_neighbors_kernel.cc +++ b/paddle/phi/kernels/cpu/graph_sample_neighbors_kernel.cc @@ -167,8 +167,8 @@ void GraphSampleNeighborsKernel( const DenseTensor& row, const DenseTensor& col_ptr, const DenseTensor& x, - paddle::optional eids, - paddle::optional perm_buffer, + const paddle::optional& eids, + const paddle::optional& perm_buffer, int sample_size, bool return_eids, bool flag_perm_buffer, diff --git a/paddle/phi/kernels/cpu/graph_send_recv_grad_kernel.cc b/paddle/phi/kernels/cpu/graph_send_recv_grad_kernel.cc index 95eeb64afea..6ea65d005c1 100644 --- a/paddle/phi/kernels/cpu/graph_send_recv_grad_kernel.cc +++ b/paddle/phi/kernels/cpu/graph_send_recv_grad_kernel.cc @@ -121,8 +121,8 @@ void GraphSendRecvGradKernel(const Context& ctx, const DenseTensor& x, const DenseTensor& src_index, const DenseTensor& dst_index, - paddle::optional out, - paddle::optional dst_count, + const paddle::optional& out, + const paddle::optional& dst_count, const DenseTensor& out_grad, const std::string& pool_type, DenseTensor* x_grad) { diff --git a/paddle/phi/kernels/cpu/hierarchical_sigmoid_grad.h b/paddle/phi/kernels/cpu/hierarchical_sigmoid_grad.h index cc67f8e7f21..9b38095f25f 100644 --- a/paddle/phi/kernels/cpu/hierarchical_sigmoid_grad.h +++ b/paddle/phi/kernels/cpu/hierarchical_sigmoid_grad.h @@ -31,9 +31,9 @@ void HierarchicalSigmoidGradKernelImpl( const DenseTensor& x, const DenseTensor& w, const DenseTensor& label, - paddle::optional path, - paddle::optional code, - paddle::optional bias, + const paddle::optional& path, + const paddle::optional& code, + const paddle::optional& bias, const DenseTensor& pre_out, const DenseTensor& out_grad, int num_classes, diff --git a/paddle/phi/kernels/cpu/hierarchical_sigmoid_grad_kernel.cc b/paddle/phi/kernels/cpu/hierarchical_sigmoid_grad_kernel.cc index 9edc9f87d4b..eee4525293f 100644 --- a/paddle/phi/kernels/cpu/hierarchical_sigmoid_grad_kernel.cc +++ b/paddle/phi/kernels/cpu/hierarchical_sigmoid_grad_kernel.cc @@ -25,9 +25,9 @@ void HierarchicalSigmoidGradKernel(const Context& ctx, const DenseTensor& x, const DenseTensor& w, const DenseTensor& label, - paddle::optional path, - paddle::optional code, - paddle::optional bias, + const paddle::optional& path, + const paddle::optional& code, + const paddle::optional& bias, const DenseTensor& pre_out, const DenseTensor& out_grad, int num_classes, diff --git a/paddle/phi/kernels/cpu/hierarchical_sigmoid_kernel.cc b/paddle/phi/kernels/cpu/hierarchical_sigmoid_kernel.cc index 4c4f1aa125a..7c3421e88d4 100644 --- a/paddle/phi/kernels/cpu/hierarchical_sigmoid_kernel.cc +++ b/paddle/phi/kernels/cpu/hierarchical_sigmoid_kernel.cc @@ -32,9 +32,9 @@ void HierarchicalSigmoidKernel(const Context& ctx, const DenseTensor& x, const DenseTensor& w, const DenseTensor& label, - paddle::optional path, - paddle::optional code, - paddle::optional bias, + const paddle::optional& path, + const paddle::optional& code, + const paddle::optional& bias, int num_classes, bool remote_prefetch, int trainer_id, diff --git a/paddle/phi/kernels/cpu/instance_norm_grad_kernel.cc b/paddle/phi/kernels/cpu/instance_norm_grad_kernel.cc index dcb4289ae8d..340d2907a79 100644 --- a/paddle/phi/kernels/cpu/instance_norm_grad_kernel.cc +++ b/paddle/phi/kernels/cpu/instance_norm_grad_kernel.cc @@ -43,7 +43,7 @@ template void InstanceNormGradKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& d_y, - paddle::optional scale, + const paddle::optional& scale, const DenseTensor& saved_mean, const DenseTensor& saved_variance, float epsilon, @@ -153,13 +153,13 @@ void InstanceNormGradKernel(const Context& dev_ctx, template void InstanceNormDoubleGradKernel(const Context& dev_ctx, const DenseTensor& x, - paddle::optional scale, + const paddle::optional& scale, const DenseTensor& saved_mean, const DenseTensor& saved_variance, const DenseTensor& dy, - paddle::optional ddx, - paddle::optional ddscale, - paddle::optional ddbias, + const paddle::optional& ddx, + const paddle::optional& ddscale, + const paddle::optional& ddbias, float epsilon, DenseTensor* dx, DenseTensor* dscale, diff --git a/paddle/phi/kernels/cpu/instance_norm_kernel.cc b/paddle/phi/kernels/cpu/instance_norm_kernel.cc index f89ecba901c..5eac473effa 100644 --- a/paddle/phi/kernels/cpu/instance_norm_kernel.cc +++ b/paddle/phi/kernels/cpu/instance_norm_kernel.cc @@ -30,8 +30,8 @@ namespace phi { template void InstanceNormKernel(const Context& dev_ctx, const DenseTensor& x, - paddle::optional scale, - paddle::optional bias, + const paddle::optional& scale, + const paddle::optional& bias, float epsilon_f, DenseTensor* y, DenseTensor* saved_mean, diff --git a/paddle/phi/kernels/cpu/interpolate_grad_kernel.cc b/paddle/phi/kernels/cpu/interpolate_grad_kernel.cc index 550439a5251..d4e13aa3b24 100644 --- a/paddle/phi/kernels/cpu/interpolate_grad_kernel.cc +++ b/paddle/phi/kernels/cpu/interpolate_grad_kernel.cc @@ -361,9 +361,9 @@ template static void Interpolate1DCPUBwd( const Context& dev_ctx, const DenseTensor& input, - paddle::optional out_size, - paddle::optional> size_tensor, - paddle::optional scale_tensor, + const paddle::optional& out_size, + const paddle::optional>& size_tensor, + const paddle::optional& scale_tensor, const DenseTensor& output_grad, const std::string& data_layout_str, int out_w, @@ -459,9 +459,9 @@ template static void Interpolate2DCPUBwd( const Context& dev_ctx, const DenseTensor& input, - paddle::optional out_size, - paddle::optional> size_tensor, - paddle::optional scale_tensor, + const paddle::optional& out_size, + const paddle::optional>& size_tensor, + const paddle::optional& scale_tensor, const DenseTensor& output_grad, const std::string& data_layout_str, int out_h, @@ -619,9 +619,9 @@ template static void Interpolate3DCPUBwd( const Context& dev_ctx, const DenseTensor& input, - paddle::optional out_size, - paddle::optional> size_tensor, - paddle::optional scale_tensor, + const paddle::optional& out_size, + const paddle::optional>& size_tensor, + const paddle::optional& scale_tensor, const DenseTensor& output_grad, const std::string& data_layout_str, int out_d, @@ -800,9 +800,9 @@ template void InterpolateGradKernel( const Context& dev_ctx, const DenseTensor& x, - paddle::optional out_size, - paddle::optional> size_tensor, - paddle::optional scale_tensor, + const paddle::optional& out_size, + const paddle::optional>& size_tensor, + const paddle::optional& scale_tensor, const DenseTensor& output_grad, const std::string& data_layout, int out_d, @@ -867,9 +867,9 @@ template void BilinearInterpGradKernel( const Context& dev_ctx, const DenseTensor& x, - paddle::optional out_size, - paddle::optional> size_tensor, - paddle::optional scale_tensor, + const paddle::optional& out_size, + const paddle::optional>& size_tensor, + const paddle::optional& scale_tensor, const DenseTensor& out_grad, const std::string& data_layout, int out_d, @@ -901,9 +901,9 @@ template void NearestInterpGradKernel( const Context& dev_ctx, const DenseTensor& x, - paddle::optional out_size, - paddle::optional> size_tensor, - paddle::optional scale_tensor, + const paddle::optional& out_size, + const paddle::optional>& size_tensor, + const paddle::optional& scale_tensor, const DenseTensor& out_grad, const std::string& data_layout, int out_d, @@ -935,9 +935,9 @@ template void TrilinearInterpGradKernel( const Context& dev_ctx, const DenseTensor& x, - paddle::optional out_size, - paddle::optional> size_tensor, - paddle::optional scale_tensor, + const paddle::optional& out_size, + const paddle::optional>& size_tensor, + const paddle::optional& scale_tensor, const DenseTensor& out_grad, const std::string& data_layout, int out_d, @@ -969,9 +969,9 @@ template void LinearInterpGradKernel( const Context& dev_ctx, const DenseTensor& x, - paddle::optional out_size, - paddle::optional> size_tensor, - paddle::optional scale_tensor, + const paddle::optional& out_size, + const paddle::optional>& size_tensor, + const paddle::optional& scale_tensor, const DenseTensor& out_grad, const std::string& data_layout, int out_d, @@ -1003,9 +1003,9 @@ template void BicubicInterpGradKernel( const Context& dev_ctx, const DenseTensor& x, - paddle::optional out_size, - paddle::optional> size_tensor, - paddle::optional scale_tensor, + const paddle::optional& out_size, + const paddle::optional>& size_tensor, + const paddle::optional& scale_tensor, const DenseTensor& out_grad, const std::string& data_layout, int out_d, diff --git a/paddle/phi/kernels/cpu/interpolate_kernel.cc b/paddle/phi/kernels/cpu/interpolate_kernel.cc index da9a54748f0..5259a770568 100644 --- a/paddle/phi/kernels/cpu/interpolate_kernel.cc +++ b/paddle/phi/kernels/cpu/interpolate_kernel.cc @@ -504,9 +504,9 @@ template static void Interpolate1DCPUFwd( const Context& dev_ctx, const DenseTensor& x, - paddle::optional out_size, - paddle::optional> size_tensor, - paddle::optional scale_tensor, + const paddle::optional& out_size, + const paddle::optional>& size_tensor, + const paddle::optional& scale_tensor, const std::string& data_layout_str, int out_w, const std::vector& scale, @@ -603,9 +603,9 @@ template static void Interpolate2DCPUFwd( const Context& dev_ctx, const DenseTensor& x, - paddle::optional out_size, - paddle::optional> size_tensor, - paddle::optional scale_tensor, + const paddle::optional& out_size, + const paddle::optional>& size_tensor, + const paddle::optional& scale_tensor, const std::string& data_layout_str, int out_h, int out_w, @@ -770,9 +770,9 @@ template static void Interpolate3DCPUFwd( const Context& dev_ctx, const DenseTensor& x, - paddle::optional out_size, - paddle::optional> size_tensor, - paddle::optional scale_tensor, + const paddle::optional& out_size, + const paddle::optional>& size_tensor, + const paddle::optional& scale_tensor, const std::string& data_layout_str, int out_d, int out_h, @@ -966,9 +966,9 @@ template void InterpolateKernel( const Context& ctx, const DenseTensor& x, - paddle::optional out_size, - paddle::optional> size_tensor, - paddle::optional scale_tensor, + const paddle::optional& out_size, + const paddle::optional>& size_tensor, + const paddle::optional& scale_tensor, const std::string& data_layout, int out_d, int out_h, @@ -1029,9 +1029,9 @@ template void BilinearInterpKernel( const Context& ctx, const DenseTensor& x, - paddle::optional out_size, - paddle::optional> size_tensor, - paddle::optional scale_tensor, + const paddle::optional& out_size, + const paddle::optional>& size_tensor, + const paddle::optional& scale_tensor, const std::string& data_layout, int out_d, int out_h, @@ -1061,9 +1061,9 @@ template void NearestInterpKernel( const Context& ctx, const DenseTensor& x, - paddle::optional out_size, - paddle::optional> size_tensor, - paddle::optional scale_tensor, + const paddle::optional& out_size, + const paddle::optional>& size_tensor, + const paddle::optional& scale_tensor, const std::string& data_layout, int out_d, int out_h, @@ -1093,9 +1093,9 @@ template void TrilinearInterpKernel( const Context& ctx, const DenseTensor& x, - paddle::optional out_size, - paddle::optional> size_tensor, - paddle::optional scale_tensor, + const paddle::optional& out_size, + const paddle::optional>& size_tensor, + const paddle::optional& scale_tensor, const std::string& data_layout, int out_d, int out_h, @@ -1125,9 +1125,9 @@ template void LinearInterpKernel( const Context& ctx, const DenseTensor& x, - paddle::optional out_size, - paddle::optional> size_tensor, - paddle::optional scale_tensor, + const paddle::optional& out_size, + const paddle::optional>& size_tensor, + const paddle::optional& scale_tensor, const std::string& data_layout, int out_d, int out_h, @@ -1157,9 +1157,9 @@ template void BicubicInterpKernel( const Context& ctx, const DenseTensor& x, - paddle::optional out_size, - paddle::optional> size_tensor, - paddle::optional scale_tensor, + const paddle::optional& out_size, + const paddle::optional>& size_tensor, + const paddle::optional& scale_tensor, const std::string& data_layout, int out_d, int out_h, diff --git a/paddle/phi/kernels/cpu/label_smooth_kernel.cc b/paddle/phi/kernels/cpu/label_smooth_kernel.cc index c76fb826cdf..af9548e8186 100644 --- a/paddle/phi/kernels/cpu/label_smooth_kernel.cc +++ b/paddle/phi/kernels/cpu/label_smooth_kernel.cc @@ -22,7 +22,7 @@ namespace phi { template void LabelSmoothKernel(const Context& ctx, const DenseTensor& label, - paddle::optional prior_dist, + const paddle::optional& prior_dist, float epsilon, DenseTensor* out) { auto label_dim = label.dims()[label.dims().size() - 1]; diff --git a/paddle/phi/kernels/cpu/layer_norm_grad_kernel.cc b/paddle/phi/kernels/cpu/layer_norm_grad_kernel.cc index 7c1b33f047b..a30f54fd4b6 100644 --- a/paddle/phi/kernels/cpu/layer_norm_grad_kernel.cc +++ b/paddle/phi/kernels/cpu/layer_norm_grad_kernel.cc @@ -32,8 +32,8 @@ namespace phi { template void LayerNormGradKernel(const Context& dev_ctx, const DenseTensor& x, - paddle::optional scale_opt, - paddle::optional bias_opt, + const paddle::optional& scale_opt, + const paddle::optional& bias_opt, const DenseTensor& mean, const DenseTensor& variance, const DenseTensor& out_grad, diff --git a/paddle/phi/kernels/cpu/layer_norm_kernel.cc b/paddle/phi/kernels/cpu/layer_norm_kernel.cc index 5b09d68c7ca..52722468e16 100644 --- a/paddle/phi/kernels/cpu/layer_norm_kernel.cc +++ b/paddle/phi/kernels/cpu/layer_norm_kernel.cc @@ -30,8 +30,8 @@ namespace phi { template void LayerNormKernel(const Context& dev_ctx, const DenseTensor& x, - paddle::optional scale_opt, - paddle::optional bias_opt, + const paddle::optional& scale_opt, + const paddle::optional& bias_opt, float epsilon, int begin_norm_axis, bool is_test, diff --git a/paddle/phi/kernels/cpu/nll_loss_grad_kernel.cc b/paddle/phi/kernels/cpu/nll_loss_grad_kernel.cc index 5b859b6ec27..dd2b09ee39a 100644 --- a/paddle/phi/kernels/cpu/nll_loss_grad_kernel.cc +++ b/paddle/phi/kernels/cpu/nll_loss_grad_kernel.cc @@ -121,7 +121,7 @@ template void NllLossGradKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& labels, - paddle::optional weight, + const paddle::optional& weight, const DenseTensor& total_weight, const DenseTensor& d_out, int64_t ignore_index, diff --git a/paddle/phi/kernels/cpu/nll_loss_kernel.cc b/paddle/phi/kernels/cpu/nll_loss_kernel.cc index 334b0082bde..92cb6a1ad17 100644 --- a/paddle/phi/kernels/cpu/nll_loss_kernel.cc +++ b/paddle/phi/kernels/cpu/nll_loss_kernel.cc @@ -154,7 +154,7 @@ template void NllLossRawKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& labels, - paddle::optional weight, + const paddle::optional& weight, int64_t ignore_index, const std::string& reduction, DenseTensor* out, diff --git a/paddle/phi/kernels/cpu/psroi_pool_grad_kernel.cc b/paddle/phi/kernels/cpu/psroi_pool_grad_kernel.cc index 715e6b008ed..b68c3ad545d 100644 --- a/paddle/phi/kernels/cpu/psroi_pool_grad_kernel.cc +++ b/paddle/phi/kernels/cpu/psroi_pool_grad_kernel.cc @@ -24,7 +24,7 @@ template void PsroiPoolGradKernel(const Context& ctx, const DenseTensor& x, const DenseTensor& rois, - paddle::optional rois_num, + const paddle::optional& rois_num, const DenseTensor& dout, int pooled_height, int pooled_width, diff --git a/paddle/phi/kernels/cpu/psroi_pool_kernel.cc b/paddle/phi/kernels/cpu/psroi_pool_kernel.cc index 06cd03395d9..4f7925ad00f 100644 --- a/paddle/phi/kernels/cpu/psroi_pool_kernel.cc +++ b/paddle/phi/kernels/cpu/psroi_pool_kernel.cc @@ -23,7 +23,7 @@ template void PsroiPoolKernel(const Context& ctx, const DenseTensor& x, const DenseTensor& rois, - paddle::optional rois_num, + const paddle::optional& rois_num, int pooled_height, int pooled_width, int output_channels, diff --git a/paddle/phi/kernels/cpu/rnn_grad_kernel.cc b/paddle/phi/kernels/cpu/rnn_grad_kernel.cc index 9b5e5cb5443..4dd1894320a 100644 --- a/paddle/phi/kernels/cpu/rnn_grad_kernel.cc +++ b/paddle/phi/kernels/cpu/rnn_grad_kernel.cc @@ -969,7 +969,7 @@ void RnnGradFunc(const CPUContext& dev_ctx, const DenseTensor& x, const std::vector& pre_state, const std::vector& weight_list, - paddle::optional sequence_length, + const paddle::optional& sequence_length, const DenseTensor& out, const DenseTensor& dropout_state, const DenseTensor& reserve, @@ -1244,7 +1244,7 @@ void RnnGradKernel(const Context& dev_ctx, const DenseTensor& x, const std::vector& pre_state, const std::vector& weight_list, - paddle::optional sequence_length, + const paddle::optional& sequence_length, const DenseTensor& out, const DenseTensor& dropout_state, const DenseTensor& reserve, diff --git a/paddle/phi/kernels/cpu/rnn_kernel.cc b/paddle/phi/kernels/cpu/rnn_kernel.cc index ae2c7a72635..80c521918ed 100644 --- a/paddle/phi/kernels/cpu/rnn_kernel.cc +++ b/paddle/phi/kernels/cpu/rnn_kernel.cc @@ -819,7 +819,7 @@ void RnnKernel(const Context& dev_ctx, const DenseTensor& x, const std::vector& pre_state, const std::vector& weight_list, - paddle::optional sequence_length, + const paddle::optional& sequence_length, float dropout_prob, bool is_bidirec, int input_size, diff --git a/paddle/phi/kernels/cpu/roi_align_grad_kernel.cc b/paddle/phi/kernels/cpu/roi_align_grad_kernel.cc index a91b8b6c1fc..ea01121509f 100644 --- a/paddle/phi/kernels/cpu/roi_align_grad_kernel.cc +++ b/paddle/phi/kernels/cpu/roi_align_grad_kernel.cc @@ -73,7 +73,7 @@ template void RoiAlignGradKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& boxes, - paddle::optional boxes_num, + const paddle::optional& boxes_num, const DenseTensor& out_grad, int pooled_height, int pooled_width, diff --git a/paddle/phi/kernels/cpu/roi_align_kernel.cc b/paddle/phi/kernels/cpu/roi_align_kernel.cc index 4752a9b3a48..cd779b72e7a 100644 --- a/paddle/phi/kernels/cpu/roi_align_kernel.cc +++ b/paddle/phi/kernels/cpu/roi_align_kernel.cc @@ -182,7 +182,7 @@ template void RoiAlignKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& boxes, - paddle::optional boxes_num, + const paddle::optional& boxes_num, int pooled_height, int pooled_width, float spatial_scale, diff --git a/paddle/phi/kernels/cpu/roi_pool_grad_kernel.cc b/paddle/phi/kernels/cpu/roi_pool_grad_kernel.cc index 0eaa873590e..f2fcfa5648d 100644 --- a/paddle/phi/kernels/cpu/roi_pool_grad_kernel.cc +++ b/paddle/phi/kernels/cpu/roi_pool_grad_kernel.cc @@ -25,7 +25,7 @@ template void RoiPoolGradKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& boxes, - paddle::optional boxes_num, + const paddle::optional& boxes_num, const DenseTensor& arg_max, const DenseTensor& out_grad, int pooled_height, diff --git a/paddle/phi/kernels/cpu/roi_pool_kernel.cc b/paddle/phi/kernels/cpu/roi_pool_kernel.cc index 02020354cd3..e088e9a2831 100644 --- a/paddle/phi/kernels/cpu/roi_pool_kernel.cc +++ b/paddle/phi/kernels/cpu/roi_pool_kernel.cc @@ -24,7 +24,7 @@ template void RoiPoolKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& boxes, - paddle::optional boxes_num, + const paddle::optional& boxes_num, int pooled_height, int pooled_width, float spatial_scale, diff --git a/paddle/phi/kernels/cpu/sgd_kernel.cc b/paddle/phi/kernels/cpu/sgd_kernel.cc index c7b4074c70a..214fd82bef3 100644 --- a/paddle/phi/kernels/cpu/sgd_kernel.cc +++ b/paddle/phi/kernels/cpu/sgd_kernel.cc @@ -118,7 +118,7 @@ void SGDDenseKernel(const Context& dev_ctx, const DenseTensor& param, const DenseTensor& learning_rate, const DenseTensor& grad, - paddle::optional master_param, + const paddle::optional& master_param, bool multi_precision, DenseTensor* param_out, DenseTensor* master_param_out) { @@ -132,7 +132,7 @@ void SGDDenseParamSparseGradKernel( const DenseTensor& param, const DenseTensor& learning_rate, const SelectedRows& grad, - paddle::optional master_param, + const paddle::optional& master_param, bool multi_precision, DenseTensor* param_out, DenseTensor* master_param_out) { @@ -146,7 +146,7 @@ void SGDSparseParamSparseGradKernel( const SelectedRows& param, const DenseTensor& learning_rate, const SelectedRows& grad, - paddle::optional master_param, + const paddle::optional& master_param, bool multi_precision, SelectedRows* param_out, SelectedRows* master_param_out) { diff --git a/paddle/phi/kernels/cpu/yolov3_loss_grad_kernel.cc b/paddle/phi/kernels/cpu/yolov3_loss_grad_kernel.cc index acd9a99cef4..383009229f9 100644 --- a/paddle/phi/kernels/cpu/yolov3_loss_grad_kernel.cc +++ b/paddle/phi/kernels/cpu/yolov3_loss_grad_kernel.cc @@ -121,7 +121,7 @@ void Yolov3LossGradKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& gt_box, const DenseTensor& gt_label, - paddle::optional gt_score, + const paddle::optional& gt_score, const DenseTensor& loss_grad, const DenseTensor& objectness_mask, const DenseTensor& gt_match_mask, diff --git a/paddle/phi/kernels/cpu/yolov3_loss_kernel.cc b/paddle/phi/kernels/cpu/yolov3_loss_kernel.cc index 6df910eea02..8a190ab25a7 100644 --- a/paddle/phi/kernels/cpu/yolov3_loss_kernel.cc +++ b/paddle/phi/kernels/cpu/yolov3_loss_kernel.cc @@ -182,7 +182,7 @@ void Yolov3LossKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& gt_box, const DenseTensor& gt_label, - paddle::optional gt_score, + const paddle::optional& gt_score, const std::vector& anchors, const std::vector& anchor_mask, int class_num, diff --git a/paddle/phi/kernels/deformable_conv_grad_kernel.h b/paddle/phi/kernels/deformable_conv_grad_kernel.h index 85786cec4c3..04fe7904a45 100644 --- a/paddle/phi/kernels/deformable_conv_grad_kernel.h +++ b/paddle/phi/kernels/deformable_conv_grad_kernel.h @@ -23,7 +23,7 @@ void DeformableConvGradKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& offset, const DenseTensor& filter, - paddle::optional mask, + const paddle::optional& mask, const DenseTensor& out_grad, const std::vector& strides, const std::vector& paddings, diff --git a/paddle/phi/kernels/deformable_conv_kernel.h b/paddle/phi/kernels/deformable_conv_kernel.h index fbbe5f62c6a..7b66e506b89 100644 --- a/paddle/phi/kernels/deformable_conv_kernel.h +++ b/paddle/phi/kernels/deformable_conv_kernel.h @@ -24,7 +24,7 @@ void DeformableConvKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& offset, const DenseTensor& filter, - paddle::optional mask, + const paddle::optional& mask, const std::vector& strides, const std::vector& paddings, const std::vector& dilations, diff --git a/paddle/phi/kernels/dropout_kernel.h b/paddle/phi/kernels/dropout_kernel.h index dc9f89e08e1..6febcd78e11 100644 --- a/paddle/phi/kernels/dropout_kernel.h +++ b/paddle/phi/kernels/dropout_kernel.h @@ -22,7 +22,7 @@ namespace phi { template void DropoutRawKernel(const Context& dev_ctx, const DenseTensor& x, - paddle::optional seed_tensor, + const paddle::optional& seed_tensor, float p, bool is_test, const std::string& mode, diff --git a/paddle/phi/kernels/elementwise_add_grad_kernel.h b/paddle/phi/kernels/elementwise_add_grad_kernel.h index 9b754cfefe3..8fc31c8878b 100644 --- a/paddle/phi/kernels/elementwise_add_grad_kernel.h +++ b/paddle/phi/kernels/elementwise_add_grad_kernel.h @@ -32,8 +32,8 @@ template void AddDoubleGradKernel(const Context& dev_ctx, const DenseTensor& y, const DenseTensor& dout, - paddle::optional ddx, - paddle::optional ddy, + const paddle::optional& ddx, + const paddle::optional& ddy, int axis, DenseTensor* ddout); diff --git a/paddle/phi/kernels/elementwise_divide_grad_kernel.h b/paddle/phi/kernels/elementwise_divide_grad_kernel.h index 6d29dae99a1..c764f05c398 100644 --- a/paddle/phi/kernels/elementwise_divide_grad_kernel.h +++ b/paddle/phi/kernels/elementwise_divide_grad_kernel.h @@ -34,8 +34,8 @@ void DivideDoubleGradKernel(const Context& dev_ctx, const DenseTensor& y, const DenseTensor& out, const DenseTensor& dx, - paddle::optional ddx, - paddle::optional ddy, + const paddle::optional& ddx, + const paddle::optional& ddy, int axis, DenseTensor* dy, DenseTensor* dout, diff --git a/paddle/phi/kernels/elementwise_multiply_grad_kernel.h b/paddle/phi/kernels/elementwise_multiply_grad_kernel.h index 517948a50d1..9cbd5040666 100644 --- a/paddle/phi/kernels/elementwise_multiply_grad_kernel.h +++ b/paddle/phi/kernels/elementwise_multiply_grad_kernel.h @@ -33,8 +33,8 @@ void MultiplyDoubleGradKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& y, const DenseTensor& dout, - paddle::optional ddx, - paddle::optional ddy, + const paddle::optional& ddx, + const paddle::optional& ddy, int axis, DenseTensor* dx, DenseTensor* dy, @@ -45,11 +45,11 @@ void MultiplyTripleGradKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& y, const DenseTensor& dout, - paddle::optional ddx, - paddle::optional ddy, + const paddle::optional& ddx, + const paddle::optional& ddy, const DenseTensor& d_dx, const DenseTensor& d_dy, - paddle::optional d_ddout, + const paddle::optional& d_ddout, int axis, DenseTensor* d_x, DenseTensor* d_y, diff --git a/paddle/phi/kernels/elementwise_subtract_grad_kernel.h b/paddle/phi/kernels/elementwise_subtract_grad_kernel.h index 97df769f4d0..536d859b46a 100644 --- a/paddle/phi/kernels/elementwise_subtract_grad_kernel.h +++ b/paddle/phi/kernels/elementwise_subtract_grad_kernel.h @@ -31,8 +31,8 @@ template void SubtractDoubleGradKernel(const Context& dev_ctx, const DenseTensor& y, const DenseTensor& dout, - paddle::optional ddx, - paddle::optional ddy, + const paddle::optional& ddx, + const paddle::optional& ddy, int axis, DenseTensor* ddout); diff --git a/paddle/phi/kernels/expand_as_kernel.h b/paddle/phi/kernels/expand_as_kernel.h index 971ea32310f..6bc6c73e737 100644 --- a/paddle/phi/kernels/expand_as_kernel.h +++ b/paddle/phi/kernels/expand_as_kernel.h @@ -21,7 +21,7 @@ namespace phi { template void ExpandAsKernel(const Context& ctx, const DenseTensor& x, - paddle::optional y, + const paddle::optional& y, const std::vector& target_shape, DenseTensor* out); diff --git a/paddle/phi/kernels/funcs/pooling.cu b/paddle/phi/kernels/funcs/pooling.cu index 417c1cd2347..b0e68abc08a 100644 --- a/paddle/phi/kernels/funcs/pooling.cu +++ b/paddle/phi/kernels/funcs/pooling.cu @@ -170,7 +170,7 @@ template __global__ void KernelPool2DGrad(const int nthreads, const T* __restrict__ input_data, const T* __restrict__ output_data, - const const T* __restrict__ output_grad, + const T* __restrict__ output_grad, const int output_width, const int output_height, const int input_width, diff --git a/paddle/phi/kernels/funcs/segment_pooling.cc b/paddle/phi/kernels/funcs/segment_pooling.cc index fbd744430aa..e6bd3719356 100644 --- a/paddle/phi/kernels/funcs/segment_pooling.cc +++ b/paddle/phi/kernels/funcs/segment_pooling.cc @@ -90,7 +90,7 @@ class SegmentPoolGradFunctor { const DenseTensor& out_grad, const DenseTensor& segments, DenseTensor* in_grad, - paddle::optional index, + const paddle::optional& index, const std::string pooltype = "SUM") { const IndexT* segment_ids = segments.data(); auto& place = *dev_ctx.eigen_device(); diff --git a/paddle/phi/kernels/funcs/segment_pooling.cu b/paddle/phi/kernels/funcs/segment_pooling.cu index 95606b15267..687cccb1f64 100644 --- a/paddle/phi/kernels/funcs/segment_pooling.cu +++ b/paddle/phi/kernels/funcs/segment_pooling.cu @@ -417,7 +417,7 @@ class SegmentPoolGradFunctor { const DenseTensor& out_grad, const DenseTensor& segments, DenseTensor* in_grad, - paddle::optional summed_ids, + const paddle::optional& summed_ids, const std::string pooltype = "SUM") { if (pooltype == "MAX" || pooltype == "MIN") { SegmentPoolCUDAGradFunctor( diff --git a/paddle/phi/kernels/funcs/segment_pooling.h b/paddle/phi/kernels/funcs/segment_pooling.h index b8281061582..09da9eb3047 100644 --- a/paddle/phi/kernels/funcs/segment_pooling.h +++ b/paddle/phi/kernels/funcs/segment_pooling.h @@ -41,7 +41,7 @@ class SegmentPoolGradFunctor { const DenseTensor& out_grad, const DenseTensor& segments, DenseTensor* in_grad, - paddle::optional summed_ids, + const paddle::optional& summed_ids, const std::string pooltype = "SUM"); }; diff --git a/paddle/phi/kernels/gpu/adam_kernel.cu b/paddle/phi/kernels/gpu/adam_kernel.cu index 33b6f3a5a1b..449aaae1a4b 100644 --- a/paddle/phi/kernels/gpu/adam_kernel.cu +++ b/paddle/phi/kernels/gpu/adam_kernel.cu @@ -135,8 +135,8 @@ void AdamDenseKernel(const Context& dev_ctx, const DenseTensor& moment2, const DenseTensor& beta1_pow, const DenseTensor& beta2_pow, - paddle::optional master_param, - paddle::optional skip_update, + const paddle::optional& master_param, + const paddle::optional& skip_update, const Scalar& beta1, const Scalar& beta2, const Scalar& epsilon, diff --git a/paddle/phi/kernels/gpu/adamw_kernel.cu b/paddle/phi/kernels/gpu/adamw_kernel.cu index 4873ba9c13d..0fff142567a 100644 --- a/paddle/phi/kernels/gpu/adamw_kernel.cu +++ b/paddle/phi/kernels/gpu/adamw_kernel.cu @@ -146,8 +146,8 @@ void AdamwDenseKernel(const Context& dev_ctx, const DenseTensor& moment2, const DenseTensor& beta1_pow, const DenseTensor& beta2_pow, - paddle::optional master_param, - paddle::optional skip_update, + const paddle::optional& master_param, + const paddle::optional& skip_update, const Scalar& beta1, const Scalar& beta2, const Scalar& epsilon, diff --git a/paddle/phi/kernels/gpu/batch_norm_grad_kernel.cu b/paddle/phi/kernels/gpu/batch_norm_grad_kernel.cu index e808ef644a2..c08fa4eb260 100644 --- a/paddle/phi/kernels/gpu/batch_norm_grad_kernel.cu +++ b/paddle/phi/kernels/gpu/batch_norm_grad_kernel.cu @@ -309,11 +309,11 @@ void BatchNormGradRawKernel(const Context &ctx, const DenseTensor &x, const DenseTensor &scale, const DenseTensor &bias, - paddle::optional mean, - paddle::optional variance, + const paddle::optional &mean, + const paddle::optional &variance, const DenseTensor &saved_mean, const DenseTensor &saved_variance, - paddle::optional reserve_space, + const paddle::optional &reserve_space, const DenseTensor &y_grad, float momentum, float epsilon_f, @@ -867,11 +867,11 @@ void BatchNormGradKernel(const Context &dev_ctx, const DenseTensor &x, const DenseTensor &scale, const DenseTensor &bias, - paddle::optional mean, - paddle::optional variance, + const paddle::optional &mean, + const paddle::optional &variance, const DenseTensor &saved_mean, const DenseTensor &saved_variance, - paddle::optional reserve_space, + const paddle::optional &reserve_space, const DenseTensor &y_grad, float momentum, float epsilon, @@ -910,8 +910,8 @@ template void BatchNormDoubleGradKernel(const Context &ctx, const DenseTensor &x, const DenseTensor &scale, - paddle::optional mean, - paddle::optional variance, + const paddle::optional &mean, + const paddle::optional &variance, const DenseTensor &saved_mean, const DenseTensor &saved_variance, const DenseTensor &y_grad, diff --git a/paddle/phi/kernels/gpu/bincount_kernel.cu b/paddle/phi/kernels/gpu/bincount_kernel.cu index a4ec894790c..8e60b31c370 100644 --- a/paddle/phi/kernels/gpu/bincount_kernel.cu +++ b/paddle/phi/kernels/gpu/bincount_kernel.cu @@ -49,7 +49,7 @@ __global__ void KernelBincount(const InputT* input, template void BincountCUDAInner(const Context& dev_ctx, const DenseTensor& x, - const paddle::optional weights, + const paddle::optional& weights, int minlength, DenseTensor* out) { const DenseTensor* input = &x; @@ -143,7 +143,7 @@ void BincountCUDAInner(const Context& dev_ctx, template void BincountKernel(const Context& dev_ctx, const DenseTensor& x, - const paddle::optional weights, + const paddle::optional& weights, int minlength, DenseTensor* out) { if (x.dtype() == DataType::INT32) { diff --git a/paddle/phi/kernels/gpu/dropout_kernel.cu b/paddle/phi/kernels/gpu/dropout_kernel.cu index bd1683ad0c7..fae0e8cb25b 100644 --- a/paddle/phi/kernels/gpu/dropout_kernel.cu +++ b/paddle/phi/kernels/gpu/dropout_kernel.cu @@ -23,7 +23,7 @@ namespace phi { template void DropoutRawKernel(const Context& dev_ctx, const DenseTensor& x, - paddle::optional seed_tensor, + const paddle::optional& seed_tensor, float p, bool is_test, const std::string& mode, diff --git a/paddle/phi/kernels/gpu/elementwise_add_grad_kernel.cu b/paddle/phi/kernels/gpu/elementwise_add_grad_kernel.cu index 8dd4d0184c2..517fbcba158 100644 --- a/paddle/phi/kernels/gpu/elementwise_add_grad_kernel.cu +++ b/paddle/phi/kernels/gpu/elementwise_add_grad_kernel.cu @@ -57,8 +57,8 @@ template void AddDoubleGradKernel(const Context& dev_ctx, const DenseTensor& y, const DenseTensor& dout, - paddle::optional ddx, - paddle::optional ddy, + const paddle::optional& ddx, + const paddle::optional& ddy, int axis, DenseTensor* ddout) { phi::AddDoubleGradImpl(dev_ctx, y, ddx, ddy, dout, axis, ddout); diff --git a/paddle/phi/kernels/gpu/elementwise_subtract_grad_kernel.cu b/paddle/phi/kernels/gpu/elementwise_subtract_grad_kernel.cu index 017616df278..45e19b98384 100644 --- a/paddle/phi/kernels/gpu/elementwise_subtract_grad_kernel.cu +++ b/paddle/phi/kernels/gpu/elementwise_subtract_grad_kernel.cu @@ -47,8 +47,8 @@ template void SubtractDoubleGradKernel(const Context& dev_ctx, const DenseTensor& y, const DenseTensor& dout, - paddle::optional ddx, - paddle::optional ddy, + const paddle::optional& ddx, + const paddle::optional& ddy, int axis, DenseTensor* ddout) { phi::SubtractDoubleGradImpl(dev_ctx, y, ddx, ddy, dout, axis, ddout); diff --git a/paddle/phi/kernels/gpu/graph_reindex_kernel.cu b/paddle/phi/kernels/gpu/graph_reindex_kernel.cu index 34bd1d6db77..9869d5a517b 100644 --- a/paddle/phi/kernels/gpu/graph_reindex_kernel.cu +++ b/paddle/phi/kernels/gpu/graph_reindex_kernel.cu @@ -286,8 +286,8 @@ void GraphReindexKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& neighbors, const DenseTensor& count, - paddle::optional hashtable_value, - paddle::optional hashtable_index, + const paddle::optional& hashtable_value, + const paddle::optional& hashtable_index, bool flag_buffer_hashtable, DenseTensor* reindex_src, DenseTensor* reindex_dst, diff --git a/paddle/phi/kernels/gpu/graph_sample_neighbors_kernel.cu b/paddle/phi/kernels/gpu/graph_sample_neighbors_kernel.cu index af616963b49..174495dad34 100644 --- a/paddle/phi/kernels/gpu/graph_sample_neighbors_kernel.cu +++ b/paddle/phi/kernels/gpu/graph_sample_neighbors_kernel.cu @@ -356,8 +356,8 @@ void GraphSampleNeighborsKernel( const DenseTensor& row, const DenseTensor& col_ptr, const DenseTensor& x, - paddle::optional eids, - paddle::optional perm_buffer, + const paddle::optional& eids, + const paddle::optional& perm_buffer, int sample_size, bool return_eids, bool flag_perm_buffer, diff --git a/paddle/phi/kernels/gpu/graph_send_recv_grad_kernel.cu b/paddle/phi/kernels/gpu/graph_send_recv_grad_kernel.cu index 2be0caff79d..8743b4e8a74 100644 --- a/paddle/phi/kernels/gpu/graph_send_recv_grad_kernel.cu +++ b/paddle/phi/kernels/gpu/graph_send_recv_grad_kernel.cu @@ -105,8 +105,8 @@ void GraphSendRecvGradKernel(const Context& ctx, const DenseTensor& x, const DenseTensor& src_index, const DenseTensor& dst_index, - paddle::optional out, - paddle::optional dst_count, + const paddle::optional& out, + const paddle::optional& dst_count, const DenseTensor& out_grad, const std::string& pool_type, DenseTensor* x_grad) { diff --git a/paddle/phi/kernels/gpu/instance_norm_grad_kernel.cu b/paddle/phi/kernels/gpu/instance_norm_grad_kernel.cu index 387127de48d..b72acc70733 100644 --- a/paddle/phi/kernels/gpu/instance_norm_grad_kernel.cu +++ b/paddle/phi/kernels/gpu/instance_norm_grad_kernel.cu @@ -291,7 +291,7 @@ template void InstanceNormGradKernel(const Context &dev_ctx, const DenseTensor &x, const DenseTensor &d_y, - paddle::optional scale, + const paddle::optional &scale, const DenseTensor &saved_mean, const DenseTensor &saved_variance, float epsilon_f, @@ -516,13 +516,13 @@ void InstanceNormGradKernel(const Context &dev_ctx, template void InstanceNormDoubleGradKernel(const Context &dev_ctx, const DenseTensor &x, - paddle::optional scale, + const paddle::optional &scale, const DenseTensor &saved_mean, const DenseTensor &saved_variance, const DenseTensor &dy, - paddle::optional ddx, - paddle::optional ddscale, - paddle::optional ddbias, + const paddle::optional &ddx, + const paddle::optional &ddscale, + const paddle::optional &ddbias, float epsilon_f, DenseTensor *dx, DenseTensor *dscale, diff --git a/paddle/phi/kernels/gpu/instance_norm_kernel.cu b/paddle/phi/kernels/gpu/instance_norm_kernel.cu index 81d94007501..b7292236898 100644 --- a/paddle/phi/kernels/gpu/instance_norm_kernel.cu +++ b/paddle/phi/kernels/gpu/instance_norm_kernel.cu @@ -26,8 +26,8 @@ namespace phi { template void InstanceNormKernel(const Context &dev_ctx, const DenseTensor &x, - paddle::optional scale, - paddle::optional bias, + const paddle::optional &scale, + const paddle::optional &bias, float epsilon_f, DenseTensor *y, DenseTensor *saved_mean, diff --git a/paddle/phi/kernels/gpu/interpolate_grad_kernel.cu b/paddle/phi/kernels/gpu/interpolate_grad_kernel.cu index 73334d9c38a..cd0f4e1493e 100644 --- a/paddle/phi/kernels/gpu/interpolate_grad_kernel.cu +++ b/paddle/phi/kernels/gpu/interpolate_grad_kernel.cu @@ -747,9 +747,9 @@ template static void Interpolate1DCUDABwd( const Context& dev_ctx, const DenseTensor& input, - paddle::optional out_size, - paddle::optional> size_tensor, - paddle::optional scale_tensor, + const paddle::optional& out_size, + const paddle::optional>& size_tensor, + const paddle::optional& scale_tensor, const DenseTensor& output_grad, const std::string& data_layout_str, int out_w, @@ -861,9 +861,9 @@ template static void Interpolate2DCUDABwd( const Context& dev_ctx, const DenseTensor& input, - paddle::optional out_size, - paddle::optional> size_tensor, - paddle::optional scale_tensor, + const paddle::optional& out_size, + const paddle::optional>& size_tensor, + const paddle::optional& scale_tensor, const DenseTensor& output_grad, const std::string& data_layout_str, int out_h, @@ -1124,9 +1124,9 @@ template static void Interpolate3DCUDABwd( const Context& dev_ctx, const DenseTensor& input, - paddle::optional out_size, - paddle::optional> size_tensor, - paddle::optional scale_tensor, + const paddle::optional& out_size, + const paddle::optional>& size_tensor, + const paddle::optional& scale_tensor, const DenseTensor& output_grad, const std::string& data_layout_str, int out_d, @@ -1334,9 +1334,9 @@ template void InterpolateGradKernel( const Context& dev_ctx, const DenseTensor& x, - paddle::optional out_size, - paddle::optional> size_tensor, - paddle::optional scale_tensor, + const paddle::optional& out_size, + const paddle::optional>& size_tensor, + const paddle::optional& scale_tensor, const DenseTensor& output_grad, const std::string& data_layout, int out_d, @@ -1401,9 +1401,9 @@ template void BilinearInterpGradKernel( const Context& dev_ctx, const DenseTensor& x, - paddle::optional out_size, - paddle::optional> size_tensor, - paddle::optional scale_tensor, + const paddle::optional& out_size, + const paddle::optional>& size_tensor, + const paddle::optional& scale_tensor, const DenseTensor& out_grad, const std::string& data_layout, int out_d, @@ -1435,9 +1435,9 @@ template void NearestInterpGradKernel( const Context& dev_ctx, const DenseTensor& x, - paddle::optional out_size, - paddle::optional> size_tensor, - paddle::optional scale_tensor, + const paddle::optional& out_size, + const paddle::optional>& size_tensor, + const paddle::optional& scale_tensor, const DenseTensor& out_grad, const std::string& data_layout, int out_d, @@ -1469,9 +1469,9 @@ template void TrilinearInterpGradKernel( const Context& dev_ctx, const DenseTensor& x, - paddle::optional out_size, - paddle::optional> size_tensor, - paddle::optional scale_tensor, + const paddle::optional& out_size, + const paddle::optional>& size_tensor, + const paddle::optional& scale_tensor, const DenseTensor& out_grad, const std::string& data_layout, int out_d, @@ -1503,9 +1503,9 @@ template void LinearInterpGradKernel( const Context& dev_ctx, const DenseTensor& x, - paddle::optional out_size, - paddle::optional> size_tensor, - paddle::optional scale_tensor, + const paddle::optional& out_size, + const paddle::optional>& size_tensor, + const paddle::optional& scale_tensor, const DenseTensor& out_grad, const std::string& data_layout, int out_d, @@ -1537,9 +1537,9 @@ template void BicubicInterpGradKernel( const Context& dev_ctx, const DenseTensor& x, - paddle::optional out_size, - paddle::optional> size_tensor, - paddle::optional scale_tensor, + const paddle::optional& out_size, + const paddle::optional>& size_tensor, + const paddle::optional& scale_tensor, const DenseTensor& out_grad, const std::string& data_layout, int out_d, diff --git a/paddle/phi/kernels/gpu/interpolate_kernel.cu b/paddle/phi/kernels/gpu/interpolate_kernel.cu index 6e609aa1167..3bd59c80710 100644 --- a/paddle/phi/kernels/gpu/interpolate_kernel.cu +++ b/paddle/phi/kernels/gpu/interpolate_kernel.cu @@ -627,9 +627,9 @@ template static void Interpolate1DCUDAFwd( const Context& dev_ctx, const DenseTensor& input, - paddle::optional out_size, - paddle::optional> size_tensor, - paddle::optional scale_tensor, + const paddle::optional& out_size, + const paddle::optional>& size_tensor, + const paddle::optional& scale_tensor, const std::string& data_layout_str, int out_w, const std::vector& scale, @@ -742,9 +742,9 @@ template static void Interpolate2DCUDAFwd( const Context& dev_ctx, const DenseTensor& input, - paddle::optional out_size, - paddle::optional> size_tensor, - paddle::optional scale_tensor, + const paddle::optional& out_size, + const paddle::optional>& size_tensor, + const paddle::optional& scale_tensor, const std::string& data_layout_str, int out_h, int out_w, @@ -997,9 +997,9 @@ template static void Interpolate3DCUDAFwd( const Context& dev_ctx, const DenseTensor& input, - paddle::optional out_size, - paddle::optional> size_tensor, - paddle::optional scale_tensor, + const paddle::optional& out_size, + const paddle::optional>& size_tensor, + const paddle::optional& scale_tensor, const std::string& data_layout_str, int out_d, int out_h, @@ -1221,9 +1221,9 @@ template void InterpolateKernel( const Context& dev_ctx, const DenseTensor& x, - paddle::optional out_size, - paddle::optional> size_tensor, - paddle::optional scale_tensor, + const paddle::optional& out_size, + const paddle::optional>& size_tensor, + const paddle::optional& scale_tensor, const std::string& data_layout, int out_d, int out_h, @@ -1283,9 +1283,9 @@ template void BilinearInterpKernel( const Context& dev_ctx, const DenseTensor& x, - paddle::optional out_size, - paddle::optional> size_tensor, - paddle::optional scale_tensor, + const paddle::optional& out_size, + const paddle::optional>& size_tensor, + const paddle::optional& scale_tensor, const std::string& data_layout, int out_d, int out_h, @@ -1315,9 +1315,9 @@ template void NearestInterpKernel( const Context& dev_ctx, const DenseTensor& x, - paddle::optional out_size, - paddle::optional> size_tensor, - paddle::optional scale_tensor, + const paddle::optional& out_size, + const paddle::optional>& size_tensor, + const paddle::optional& scale_tensor, const std::string& data_layout, int out_d, int out_h, @@ -1347,9 +1347,9 @@ template void TrilinearInterpKernel( const Context& dev_ctx, const DenseTensor& x, - paddle::optional out_size, - paddle::optional> size_tensor, - paddle::optional scale_tensor, + const paddle::optional& out_size, + const paddle::optional>& size_tensor, + const paddle::optional& scale_tensor, const std::string& data_layout, int out_d, int out_h, @@ -1379,9 +1379,9 @@ template void LinearInterpKernel( const Context& dev_ctx, const DenseTensor& x, - paddle::optional out_size, - paddle::optional> size_tensor, - paddle::optional scale_tensor, + const paddle::optional& out_size, + const paddle::optional>& size_tensor, + const paddle::optional& scale_tensor, const std::string& data_layout, int out_d, int out_h, @@ -1411,9 +1411,9 @@ template void BicubicInterpKernel( const Context& dev_ctx, const DenseTensor& x, - paddle::optional out_size, - paddle::optional> size_tensor, - paddle::optional scale_tensor, + const paddle::optional& out_size, + const paddle::optional>& size_tensor, + const paddle::optional& scale_tensor, const std::string& data_layout, int out_d, int out_h, diff --git a/paddle/phi/kernels/gpu/label_smooth_kernel.cu b/paddle/phi/kernels/gpu/label_smooth_kernel.cu index 50f7548450c..bf7ac939eb3 100644 --- a/paddle/phi/kernels/gpu/label_smooth_kernel.cu +++ b/paddle/phi/kernels/gpu/label_smooth_kernel.cu @@ -53,7 +53,7 @@ __global__ void LabelSmoothRunDistKernel(const int N, template void LabelSmoothKernel(const Context& ctx, const DenseTensor& label, - paddle::optional prior_dist, + const paddle::optional& prior_dist, float epsilon, DenseTensor* out) { auto label_dim = label.dims()[label.dims().size() - 1]; diff --git a/paddle/phi/kernels/gpu/layer_norm_grad_kernel.cu b/paddle/phi/kernels/gpu/layer_norm_grad_kernel.cu index 146d307a593..961937441e1 100644 --- a/paddle/phi/kernels/gpu/layer_norm_grad_kernel.cu +++ b/paddle/phi/kernels/gpu/layer_norm_grad_kernel.cu @@ -24,8 +24,8 @@ namespace phi { template void LayerNormGradKernel(const Context &dev_ctx, const DenseTensor &x, - paddle::optional scale_opt, - paddle::optional bias_opt, + const paddle::optional &scale_opt, + const paddle::optional &bias_opt, const DenseTensor &mean, const DenseTensor &variance, const DenseTensor &out_grad, diff --git a/paddle/phi/kernels/gpu/layer_norm_kernel.cu b/paddle/phi/kernels/gpu/layer_norm_kernel.cu index d87b7c21938..72127042c16 100644 --- a/paddle/phi/kernels/gpu/layer_norm_kernel.cu +++ b/paddle/phi/kernels/gpu/layer_norm_kernel.cu @@ -55,8 +55,8 @@ template class LayerNormDirectCUDAFunctor; template void LayerNormKernel(const Context &dev_ctx, const DenseTensor &x, - paddle::optional scale_opt, - paddle::optional bias_opt, + const paddle::optional &scale_opt, + const paddle::optional &bias_opt, float epsilon, int begin_norm_axis, bool is_test, diff --git a/paddle/phi/kernels/gpu/nll_loss_grad_kernel.cu b/paddle/phi/kernels/gpu/nll_loss_grad_kernel.cu index 43106ec1d86..407f33c4008 100644 --- a/paddle/phi/kernels/gpu/nll_loss_grad_kernel.cu +++ b/paddle/phi/kernels/gpu/nll_loss_grad_kernel.cu @@ -23,7 +23,7 @@ template void NllLossGradKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& labels, - paddle::optional weight, + const paddle::optional& weight, const DenseTensor& total_weight, const DenseTensor& dout, int64_t ignore_index, diff --git a/paddle/phi/kernels/gpu/nll_loss_kernel.cu b/paddle/phi/kernels/gpu/nll_loss_kernel.cu index 6b0e1fef7ba..99a8b10b11b 100644 --- a/paddle/phi/kernels/gpu/nll_loss_kernel.cu +++ b/paddle/phi/kernels/gpu/nll_loss_kernel.cu @@ -24,7 +24,7 @@ template void NllLossRawKernel(const Context& dev_ctx, const DenseTensor& input, const DenseTensor& label, - paddle::optional weight, + const paddle::optional& weight, int64_t ignore_index, const std::string& reduction, DenseTensor* out, diff --git a/paddle/phi/kernels/gpu/psroi_pool_grad_kernel.cu b/paddle/phi/kernels/gpu/psroi_pool_grad_kernel.cu index 6745653eba7..45e4730e173 100644 --- a/paddle/phi/kernels/gpu/psroi_pool_grad_kernel.cu +++ b/paddle/phi/kernels/gpu/psroi_pool_grad_kernel.cu @@ -107,7 +107,7 @@ template void PsroiPoolGradKernel(const Context& ctx, const DenseTensor& x, const DenseTensor& rois, - paddle::optional rois_num, + const paddle::optional& rois_num, const DenseTensor& dout, int pooled_height, int pooled_width, diff --git a/paddle/phi/kernels/gpu/psroi_pool_kernel.cu b/paddle/phi/kernels/gpu/psroi_pool_kernel.cu index 8f9be001ba7..f296d0d2074 100644 --- a/paddle/phi/kernels/gpu/psroi_pool_kernel.cu +++ b/paddle/phi/kernels/gpu/psroi_pool_kernel.cu @@ -107,7 +107,7 @@ template void PsroiPoolKernel(const Context& ctx, const DenseTensor& x, const DenseTensor& rois, - paddle::optional rois_num, + const paddle::optional& rois_num, int pooled_height, int pooled_width, int output_channels, diff --git a/paddle/phi/kernels/gpu/rnn_grad_kernel.cu.cc b/paddle/phi/kernels/gpu/rnn_grad_kernel.cu.cc index 76407475281..98c2f618e78 100644 --- a/paddle/phi/kernels/gpu/rnn_grad_kernel.cu.cc +++ b/paddle/phi/kernels/gpu/rnn_grad_kernel.cu.cc @@ -80,7 +80,7 @@ void RnnGradKernel(const Context &dev_ctx, const DenseTensor &x, const std::vector &pre_state, const std::vector &weight_list, - paddle::optional sequence_length, + const paddle::optional &sequence_length, const DenseTensor &out, const DenseTensor &dropout_state, const DenseTensor &reserve, diff --git a/paddle/phi/kernels/gpu/rnn_kernel.cu.cc b/paddle/phi/kernels/gpu/rnn_kernel.cu.cc index f2ffe3c9d4f..5a19d5b89f0 100644 --- a/paddle/phi/kernels/gpu/rnn_kernel.cu.cc +++ b/paddle/phi/kernels/gpu/rnn_kernel.cu.cc @@ -134,7 +134,7 @@ void RnnKernel(const Context &dev_ctx, const DenseTensor &x, const std::vector &pre_state, const std::vector &weight_list, - paddle::optional sequence_length, + const paddle::optional &sequence_length, float dropout_prob, bool is_bidirec, int input_size, diff --git a/paddle/phi/kernels/gpu/roi_align_grad_kernel.cu b/paddle/phi/kernels/gpu/roi_align_grad_kernel.cu index cf076128b69..9f9ea675340 100644 --- a/paddle/phi/kernels/gpu/roi_align_grad_kernel.cu +++ b/paddle/phi/kernels/gpu/roi_align_grad_kernel.cu @@ -172,7 +172,7 @@ template void RoiAlignGradKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& boxes, - paddle::optional boxes_num, + const paddle::optional& boxes_num, const DenseTensor& out_grad, int pooled_height, int pooled_width, diff --git a/paddle/phi/kernels/gpu/roi_align_kernel.cu b/paddle/phi/kernels/gpu/roi_align_kernel.cu index cb3375dee95..fc24179ed3d 100644 --- a/paddle/phi/kernels/gpu/roi_align_kernel.cu +++ b/paddle/phi/kernels/gpu/roi_align_kernel.cu @@ -139,7 +139,7 @@ template void RoiAlignKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& boxes, - paddle::optional boxes_num, + const paddle::optional& boxes_num, int pooled_height, int pooled_width, float spatial_scale, diff --git a/paddle/phi/kernels/gpu/roi_pool_grad_kernel.cu b/paddle/phi/kernels/gpu/roi_pool_grad_kernel.cu index d093a71d23f..1a5af93c562 100644 --- a/paddle/phi/kernels/gpu/roi_pool_grad_kernel.cu +++ b/paddle/phi/kernels/gpu/roi_pool_grad_kernel.cu @@ -75,7 +75,7 @@ template void RoiPoolGradKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& boxes, - paddle::optional boxes_num, + const paddle::optional& boxes_num, const DenseTensor& arg_max, const DenseTensor& out_grad, int pooled_height, diff --git a/paddle/phi/kernels/gpu/roi_pool_kernel.cu b/paddle/phi/kernels/gpu/roi_pool_kernel.cu index ab33e2cf647..32ea6223c9c 100644 --- a/paddle/phi/kernels/gpu/roi_pool_kernel.cu +++ b/paddle/phi/kernels/gpu/roi_pool_kernel.cu @@ -104,7 +104,7 @@ template void RoiPoolKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& boxes, - paddle::optional boxes_num, + const paddle::optional& boxes_num, int pooled_height, int pooled_width, float spatial_scale, diff --git a/paddle/phi/kernels/gpu/sgd_kernel.cu b/paddle/phi/kernels/gpu/sgd_kernel.cu index 7dd5a03383f..d71112a2f28 100644 --- a/paddle/phi/kernels/gpu/sgd_kernel.cu +++ b/paddle/phi/kernels/gpu/sgd_kernel.cu @@ -69,7 +69,7 @@ void SGDDenseKernel(const Context& dev_ctx, const DenseTensor& param, const DenseTensor& learning_rate, const DenseTensor& grad, - paddle::optional master_param, + const paddle::optional& master_param, bool multi_precision, DenseTensor* param_out, DenseTensor* master_param_out) { @@ -106,7 +106,7 @@ void SGDDenseParamSparseGradKernel( const DenseTensor& param, const DenseTensor& learning_rate, const SelectedRows& grad, - paddle::optional master_param, + const paddle::optional& master_param, bool multi_precision, DenseTensor* param_out, DenseTensor* master_param_out) { @@ -175,7 +175,7 @@ void SGDSparseParamSparseGradKernel( const SelectedRows& param, const DenseTensor& learning_rate, const SelectedRows& grad, - paddle::optional master_param, + const paddle::optional& master_param, bool multi_precision, SelectedRows* param_out, SelectedRows* master_param_out) { diff --git a/paddle/phi/kernels/gpudnn/conv_grad_grad_kernel.cu b/paddle/phi/kernels/gpudnn/conv_grad_grad_kernel.cu index 58c7ea69869..b396e8fa6b0 100644 --- a/paddle/phi/kernels/gpudnn/conv_grad_grad_kernel.cu +++ b/paddle/phi/kernels/gpudnn/conv_grad_grad_kernel.cu @@ -47,8 +47,8 @@ void ConvCudnnGradGradKernel( const DenseTensor& input, const DenseTensor& filter, const DenseTensor& out_grad, - paddle::optional input_grad_grad, - paddle::optional filter_grad_grad, + const paddle::optional& input_grad_grad, + const paddle::optional& filter_grad_grad, const std::vector& strides, const std::vector& paddings_t, const std::string& padding_algorithm, @@ -670,8 +670,8 @@ void ConvCudnnGradGradKernel( template void DepthwiseConvCudnnGradGradKernel( const Context& ctx, - paddle::optional input_grad_grad, - paddle::optional filter_grad_grad, + const paddle::optional& input_grad_grad, + const paddle::optional& filter_grad_grad, const DenseTensor& out_grad, const DenseTensor& input, const DenseTensor& filter, @@ -711,8 +711,8 @@ void DepthwiseConvCudnnGradGradKernel( template void Conv3DCudnnGradGradKernel( const Context& ctx, - paddle::optional input_grad_grad, - paddle::optional filter_grad_grad, + const paddle::optional& input_grad_grad, + const paddle::optional& filter_grad_grad, const DenseTensor& out_grad, const DenseTensor& input, const DenseTensor& filter, diff --git a/paddle/phi/kernels/graph_reindex_kernel.h b/paddle/phi/kernels/graph_reindex_kernel.h index 68f1ebc6f5c..12a742006ee 100644 --- a/paddle/phi/kernels/graph_reindex_kernel.h +++ b/paddle/phi/kernels/graph_reindex_kernel.h @@ -23,8 +23,8 @@ void GraphReindexKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& neighbors, const DenseTensor& count, - paddle::optional hashtable_value, - paddle::optional hashtable_index, + const paddle::optional& hashtable_value, + const paddle::optional& hashtable_index, bool flag_buffer_hashtable, DenseTensor* reindex_src, DenseTensor* reindex_dst, diff --git a/paddle/phi/kernels/graph_sample_neighbors_kernel.h b/paddle/phi/kernels/graph_sample_neighbors_kernel.h index f7d205bd08a..065c7f14122 100644 --- a/paddle/phi/kernels/graph_sample_neighbors_kernel.h +++ b/paddle/phi/kernels/graph_sample_neighbors_kernel.h @@ -24,8 +24,8 @@ void GraphSampleNeighborsKernel( const DenseTensor& row, const DenseTensor& col_ptr, const DenseTensor& x, - paddle::optional eids, - paddle::optional perm_buffer, + const paddle::optional& eids, + const paddle::optional& perm_buffer, int sample_size, bool return_eids, bool flag_perm_buffer, diff --git a/paddle/phi/kernels/graph_send_recv_grad_kernel.h b/paddle/phi/kernels/graph_send_recv_grad_kernel.h index c0b1a34d09c..fbb6db358a4 100644 --- a/paddle/phi/kernels/graph_send_recv_grad_kernel.h +++ b/paddle/phi/kernels/graph_send_recv_grad_kernel.h @@ -25,8 +25,8 @@ void GraphSendRecvGradKernel(const Context& ctx, const DenseTensor& x, const DenseTensor& src_index, const DenseTensor& dst_index, - paddle::optional out, - paddle::optional dst_count, + const paddle::optional& out, + const paddle::optional& dst_count, const DenseTensor& out_grad, const std::string& pool_type, DenseTensor* x_grad); diff --git a/paddle/phi/kernels/hierarchical_sigmoid_grad_kernel.h b/paddle/phi/kernels/hierarchical_sigmoid_grad_kernel.h index 7922a767db2..c0da8faadd5 100644 --- a/paddle/phi/kernels/hierarchical_sigmoid_grad_kernel.h +++ b/paddle/phi/kernels/hierarchical_sigmoid_grad_kernel.h @@ -23,9 +23,9 @@ void HierarchicalSigmoidGradKernel(const Context& ctx, const DenseTensor& x, const DenseTensor& w, const DenseTensor& label, - paddle::optional path, - paddle::optional code, - paddle::optional bias, + const paddle::optional& path, + const paddle::optional& code, + const paddle::optional& bias, const DenseTensor& pre_out, const DenseTensor& out_grad, int num_classes, diff --git a/paddle/phi/kernels/hierarchical_sigmoid_kernel.h b/paddle/phi/kernels/hierarchical_sigmoid_kernel.h index 619b022904b..e32306b645a 100644 --- a/paddle/phi/kernels/hierarchical_sigmoid_kernel.h +++ b/paddle/phi/kernels/hierarchical_sigmoid_kernel.h @@ -23,9 +23,9 @@ void HierarchicalSigmoidKernel(const Context& ctx, const DenseTensor& x, const DenseTensor& w, const DenseTensor& label, - paddle::optional path, - paddle::optional code, - paddle::optional bias, + const paddle::optional& path, + const paddle::optional& code, + const paddle::optional& bias, int num_classes, bool remote_prefetch, int trainer_id, diff --git a/paddle/phi/kernels/impl/activation_grad_impl.h b/paddle/phi/kernels/impl/activation_grad_impl.h index 04391d2538c..80dba29e76c 100644 --- a/paddle/phi/kernels/impl/activation_grad_impl.h +++ b/paddle/phi/kernels/impl/activation_grad_impl.h @@ -265,7 +265,7 @@ void SigmoidTripleGradKernel(const Context& dev_ctx, const DenseTensor& dout, const DenseTensor& ddx, const DenseTensor& d_dout_new, - paddle::optional d_ddout, + const paddle::optional& d_ddout, DenseTensor* d_out_new, DenseTensor* d_dout, DenseTensor* d_ddx) { diff --git a/paddle/phi/kernels/impl/bilinear_tensor_product_kernel_impl.h b/paddle/phi/kernels/impl/bilinear_tensor_product_kernel_impl.h index 3f30a4b958e..4a2e41532e9 100644 --- a/paddle/phi/kernels/impl/bilinear_tensor_product_kernel_impl.h +++ b/paddle/phi/kernels/impl/bilinear_tensor_product_kernel_impl.h @@ -26,7 +26,7 @@ void BilinearTensorProductKernel(const Context& ctx, const DenseTensor& x, const DenseTensor& y, const DenseTensor& weight, - paddle::optional bias, + const paddle::optional& bias, DenseTensor* out) { ctx.template Alloc(out); diff --git a/paddle/phi/kernels/impl/conv_grad_grad_kernel_impl.h b/paddle/phi/kernels/impl/conv_grad_grad_kernel_impl.h index 64306bc827e..512b1529f91 100644 --- a/paddle/phi/kernels/impl/conv_grad_grad_kernel_impl.h +++ b/paddle/phi/kernels/impl/conv_grad_grad_kernel_impl.h @@ -29,8 +29,8 @@ void ConvGradGradKernel(const Context& dev_ctx, const DenseTensor& input, const DenseTensor& filter, const DenseTensor& out_grad, - paddle::optional input_grad_grad, - paddle::optional filter_grad_grad, + const paddle::optional& input_grad_grad, + const paddle::optional& filter_grad_grad, const std::vector& strides_t, const std::vector& paddings_t, const std::string& padding_algorithm, diff --git a/paddle/phi/kernels/impl/deformable_conv_grad_kernel_impl.h b/paddle/phi/kernels/impl/deformable_conv_grad_kernel_impl.h index 8d8e66a02f5..744c48b2bfb 100644 --- a/paddle/phi/kernels/impl/deformable_conv_grad_kernel_impl.h +++ b/paddle/phi/kernels/impl/deformable_conv_grad_kernel_impl.h @@ -163,7 +163,7 @@ void DeformableConvGradKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& offset, const DenseTensor& filter, - paddle::optional mask, + const paddle::optional& mask, const DenseTensor& out_grad, const std::vector& strides, const std::vector& paddings, diff --git a/paddle/phi/kernels/impl/deformable_conv_kernel_impl.h b/paddle/phi/kernels/impl/deformable_conv_kernel_impl.h index 6c0457024dd..f864c2e5f0e 100644 --- a/paddle/phi/kernels/impl/deformable_conv_kernel_impl.h +++ b/paddle/phi/kernels/impl/deformable_conv_kernel_impl.h @@ -28,7 +28,7 @@ void DeformableConvKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& offset, const DenseTensor& filter, - paddle::optional mask, + const paddle::optional& mask, const std::vector& strides, const std::vector& paddings, const std::vector& dilations, diff --git a/paddle/phi/kernels/impl/elementwise_grad_kernel_impl.h b/paddle/phi/kernels/impl/elementwise_grad_kernel_impl.h index 3c06b238d14..73935640e34 100644 --- a/paddle/phi/kernels/impl/elementwise_grad_kernel_impl.h +++ b/paddle/phi/kernels/impl/elementwise_grad_kernel_impl.h @@ -53,8 +53,8 @@ void AddGradImpl(const Context& dev_ctx, template void AddDoubleGradImpl(const Context& dev_ctx, const DenseTensor& y, - const paddle::optional& ddx, - const paddle::optional& ddy, + const paddle::optional& ddx, + const paddle::optional& ddy, const DenseTensor& dout, int axis, DenseTensor* ddout) { @@ -87,8 +87,8 @@ void AddDoubleGradImpl(const Context& dev_ctx, template void SubtractDoubleGradImpl(const Context& dev_ctx, const DenseTensor& y, - const paddle::optional& ddx, - const paddle::optional& ddy, + const paddle::optional& ddx, + const paddle::optional& ddy, const DenseTensor& dout, int axis, DenseTensor* ddout) { @@ -160,8 +160,8 @@ void DivideDoubleGradKernel(const Context& dev_ctx, const DenseTensor& y, const DenseTensor& out, const DenseTensor& dx, - paddle::optional ddx, - paddle::optional ddy, + const paddle::optional& ddx, + const paddle::optional& ddy, int axis, DenseTensor* dy, DenseTensor* dout, @@ -416,8 +416,8 @@ void MultiplyDoubleGradKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& y, const DenseTensor& dout, - paddle::optional ddx, - paddle::optional ddy, + const paddle::optional& ddx, + const paddle::optional& ddy, int axis, DenseTensor* dx, DenseTensor* dy, @@ -535,11 +535,11 @@ void MultiplyTripleGradKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& y, const DenseTensor& dout, - paddle::optional ddx, - paddle::optional ddy, + const paddle::optional& ddx, + const paddle::optional& ddy, const DenseTensor& d_dx, const DenseTensor& d_dy, - paddle::optional d_ddout, + const paddle::optional& d_ddout, int axis, DenseTensor* d_x, DenseTensor* d_y, diff --git a/paddle/phi/kernels/impl/expand_as_kernel_impl.h b/paddle/phi/kernels/impl/expand_as_kernel_impl.h index e5138e4e12c..a5661aaa2ac 100644 --- a/paddle/phi/kernels/impl/expand_as_kernel_impl.h +++ b/paddle/phi/kernels/impl/expand_as_kernel_impl.h @@ -93,7 +93,7 @@ void ExpandAs(const Context& context, template void ExpandAsKernel(const Context& ctx, const DenseTensor& x, - paddle::optional y, + const paddle::optional& y, const std::vector& target_shape, DenseTensor* out) { auto rank = x.dims().size(); diff --git a/paddle/phi/kernels/impl/matmul_grad_kernel_impl.h b/paddle/phi/kernels/impl/matmul_grad_kernel_impl.h index 25a9db868d3..5641e7a8274 100644 --- a/paddle/phi/kernels/impl/matmul_grad_kernel_impl.h +++ b/paddle/phi/kernels/impl/matmul_grad_kernel_impl.h @@ -473,8 +473,8 @@ void MatmulDoubleGradKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& y, const DenseTensor& dout, - paddle::optional ddx, - paddle::optional ddy, + const paddle::optional& ddx, + const paddle::optional& ddy, bool transpose_x, bool transpose_y, DenseTensor* dx, @@ -854,9 +854,9 @@ void MatmulTripleGradKernel(const Context& dev_ctx, const DenseTensor& dout, const DenseTensor& ddx, const DenseTensor& ddy, - paddle::optional d_dx, - paddle::optional d_dy, - paddle::optional d_ddout, + const paddle::optional& d_dx, + const paddle::optional& d_dy, + const paddle::optional& d_ddout, bool transpose_x, bool transpose_y, DenseTensor* out_d_x, @@ -1790,8 +1790,8 @@ void MatmulWithFlattenDoubleGradKernel( const DenseTensor& x, const DenseTensor& y, const DenseTensor& out_grad, - paddle::optional x_grad_grad, - paddle::optional y_grad_grad, + const paddle::optional& x_grad_grad, + const paddle::optional& y_grad_grad, int x_num_col_dims, int y_num_col_dims, DenseTensor* x_grad, diff --git a/paddle/phi/kernels/impl/momentum_kernel_impl.h b/paddle/phi/kernels/impl/momentum_kernel_impl.h index 3aca225ad40..825a3b9d569 100644 --- a/paddle/phi/kernels/impl/momentum_kernel_impl.h +++ b/paddle/phi/kernels/impl/momentum_kernel_impl.h @@ -408,7 +408,7 @@ void MomentumDenseImpl(const Context& ctx, const DenseTensor& grad, const DenseTensor& velocity, const DenseTensor& learning_rate, - paddle::optional master_param_opt, + const paddle::optional& master_param_opt, float mu_t, bool use_nesterov, const std::string& regularization_method, @@ -500,7 +500,7 @@ void MomentumSparseImpl(const Context& ctx, const SelectedRows& grad, const DenseTensor& velocity, const DenseTensor& learning_rate, - paddle::optional master_param_opt, + const paddle::optional& master_param_opt, float mu_t, bool use_nesterov, const std::string& regularization_method, @@ -602,7 +602,7 @@ void MomentumDenseKernel(const Context& dev_ctx, const DenseTensor& grad, const DenseTensor& velocity, const DenseTensor& learning_rate, - paddle::optional master_param, + const paddle::optional& master_param, float mu, bool use_nesterov, const std::string& regularization_method, @@ -654,7 +654,7 @@ void MomentumSparseKernel(const Context& dev_ctx, const SelectedRows& grad, const DenseTensor& velocity, const DenseTensor& learning_rate, - paddle::optional master_param, + const paddle::optional& master_param, float mu, bool use_nesterov, const std::string& regularization_method, diff --git a/paddle/phi/kernels/impl/rmsprop_kernel_impl.h b/paddle/phi/kernels/impl/rmsprop_kernel_impl.h index 64b12837074..1954c5f20db 100644 --- a/paddle/phi/kernels/impl/rmsprop_kernel_impl.h +++ b/paddle/phi/kernels/impl/rmsprop_kernel_impl.h @@ -146,7 +146,7 @@ void RmspropDenseKernel(const Context &ctx, const DenseTensor &grad, const DenseTensor &moment, const DenseTensor &learning_rate, - paddle::optional mean_grad_opt, + const paddle::optional &mean_grad_opt, float epsilon_t, float decay_t, float momentum_t, @@ -196,11 +196,19 @@ void RmspropDenseKernel(const Context &ctx, if (centered) { auto mg_tensor = mean_grad_opt.get_ptr(); auto mg = EigenVector::Flatten(*mg_tensor); - PADDLE_ENFORCE_EQ( - mg_tensor, - mean_grad_out, - phi::errors::InvalidArgument( - "MeanGrad and MeanGradOut must be the same Tensor")); + if (mg_tensor) { + PADDLE_ENFORCE_EQ( + mg_tensor->Holder(), + mean_grad_out->Holder(), + phi::errors::InvalidArgument( + "MeanGrad and MeanGradOut must be the same Tensor")); + } else { + PADDLE_ENFORCE_EQ( + mg_tensor, + mean_grad_out, + phi::errors::InvalidArgument( + "MeanGrad and MeanGradOut must be the same Tensor")); + } auto mg_out = EigenVector::Flatten(*mean_grad_out); mg_out.device(place) = rho * mg + (1 - rho) * g; @@ -217,12 +225,20 @@ void RmspropDenseKernel(const Context &ctx, funcs::ForRange for_range(ctx, limit); if (centered) { auto mg_tensor = mean_grad_opt.get_ptr(); + if (mg_tensor) { + PADDLE_ENFORCE_EQ( + mg_tensor->Holder(), + mean_grad_out->Holder(), + phi::errors::InvalidArgument( + "MeanGrad and MeanGradOut must be the same Tensor")); + } else { + PADDLE_ENFORCE_EQ( + mg_tensor, + mean_grad_out, + phi::errors::InvalidArgument( + "MeanGrad and MeanGradOut must be the same Tensor")); + } - PADDLE_ENFORCE_EQ( - mg_tensor, - mean_grad_out, - phi::errors::InvalidArgument( - "MeanGrad and MeanGradOut must be the same Tensor")); for_range(CenteredRmspropFunctor>( ctx.template Alloc(param_out), ctx.template Alloc(mean_square_out), @@ -254,7 +270,7 @@ void RmspropSparseKernel(const Context &ctx, const SelectedRows &grad, const DenseTensor &moment, const DenseTensor &learning_rate, - paddle::optional mean_grad_opt, + const paddle::optional &mean_grad_opt, float epsilon_t, float decay_t, float momentum_t, @@ -305,11 +321,20 @@ void RmspropSparseKernel(const Context &ctx, if (centered) { auto mg_tensor = mean_grad_opt.get_ptr(); + if (mg_tensor) { + PADDLE_ENFORCE_EQ( + mg_tensor->Holder(), + mean_grad_out->Holder(), + phi::errors::InvalidArgument( + "MeanGrad and MeanGradOut must be the same Tensor")); + } else { + PADDLE_ENFORCE_EQ( + mg_tensor, + mean_grad_out, + phi::errors::InvalidArgument( + "MeanGrad and MeanGradOut must be the same Tensor")); + } - PADDLE_ENFORCE_EQ(mg_tensor, - mean_grad_out, - phi::errors::InvalidArgument( - "MeanGrad and MeanGradOut must be the same Tensor")); for_range(CenteredRmspropFunctor>( ctx.template Alloc(param_out), ctx.template Alloc(mean_square_out), diff --git a/paddle/phi/kernels/impl/segment_pool_grad_kernel_impl.h b/paddle/phi/kernels/impl/segment_pool_grad_kernel_impl.h index 4ba1a0c6b6c..bd0ba26b99a 100644 --- a/paddle/phi/kernels/impl/segment_pool_grad_kernel_impl.h +++ b/paddle/phi/kernels/impl/segment_pool_grad_kernel_impl.h @@ -27,7 +27,7 @@ void SegmentPoolGradKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& segment_ids, const DenseTensor& out, - paddle::optional summed_ids, + const paddle::optional& summed_ids, const DenseTensor& out_grad, const std::string& pooltype, DenseTensor* x_grad) { diff --git a/paddle/phi/kernels/impl/warpctc_grad_kernel_impl.h b/paddle/phi/kernels/impl/warpctc_grad_kernel_impl.h index b788c966a1a..b07628c9814 100644 --- a/paddle/phi/kernels/impl/warpctc_grad_kernel_impl.h +++ b/paddle/phi/kernels/impl/warpctc_grad_kernel_impl.h @@ -32,7 +32,7 @@ void WarpctcGradKernel(const Context& dev_ctx, const DenseTensor& warpctc_grad, const DenseTensor& logits, const DenseTensor& loss_grad, - const paddle::optional logits_length, + const paddle::optional& logits_length, int blank, bool norm_by_times, DenseTensor* logits_grad) { diff --git a/paddle/phi/kernels/impl/warpctc_kernel_impl.h b/paddle/phi/kernels/impl/warpctc_kernel_impl.h index ef6be7a9dfa..6c792507c6f 100644 --- a/paddle/phi/kernels/impl/warpctc_kernel_impl.h +++ b/paddle/phi/kernels/impl/warpctc_kernel_impl.h @@ -229,8 +229,8 @@ template void WarpctcKernel(const Context& dev_ctx, const DenseTensor& logits, const DenseTensor& label, - const paddle::optional logits_length, - const paddle::optional labels_length, + const paddle::optional& logits_length, + const paddle::optional& labels_length, int blank, bool norm_by_times, DenseTensor* warpctc_grad, diff --git a/paddle/phi/kernels/instance_norm_grad_kernel.h b/paddle/phi/kernels/instance_norm_grad_kernel.h index 7924c767ab6..be7e4ce3e34 100644 --- a/paddle/phi/kernels/instance_norm_grad_kernel.h +++ b/paddle/phi/kernels/instance_norm_grad_kernel.h @@ -22,7 +22,7 @@ template void InstanceNormGradKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& y_grad, - paddle::optional scale, + const paddle::optional& scale, const DenseTensor& saved_mean, const DenseTensor& saved_variance, float epsilon, @@ -33,13 +33,13 @@ void InstanceNormGradKernel(const Context& dev_ctx, template void InstanceNormDoubleGradKernel(const Context& dev_ctx, const DenseTensor& x, - paddle::optional scale, + const paddle::optional& scale, const DenseTensor& saved_mean, const DenseTensor& saved_variance, const DenseTensor& dy, - paddle::optional ddx, - paddle::optional ddscale, - paddle::optional ddbias, + const paddle::optional& ddx, + const paddle::optional& ddscale, + const paddle::optional& ddbias, float epsilon, DenseTensor* dx, DenseTensor* dscale, diff --git a/paddle/phi/kernels/instance_norm_kernel.h b/paddle/phi/kernels/instance_norm_kernel.h index 8c50025a73c..f8f1bbe1287 100644 --- a/paddle/phi/kernels/instance_norm_kernel.h +++ b/paddle/phi/kernels/instance_norm_kernel.h @@ -21,8 +21,8 @@ namespace phi { template void InstanceNormKernel(const Context& dev_ctx, const DenseTensor& x, - paddle::optional scale, - paddle::optional bias, + const paddle::optional& scale, + const paddle::optional& bias, float epsilon, DenseTensor* y, DenseTensor* saved_mean, diff --git a/paddle/phi/kernels/interpolate_grad_kernel.h b/paddle/phi/kernels/interpolate_grad_kernel.h index 59d2dddd870..b8eefad61a7 100644 --- a/paddle/phi/kernels/interpolate_grad_kernel.h +++ b/paddle/phi/kernels/interpolate_grad_kernel.h @@ -22,9 +22,9 @@ template void BilinearInterpGradKernel( const Context& ctx, const DenseTensor& x, - paddle::optional out_size, - paddle::optional> size_tensor, - paddle::optional scale_tensor, + const paddle::optional& out_size, + const paddle::optional>& size_tensor, + const paddle::optional& scale_tensor, const DenseTensor& out_grad, const std::string& data_layout, int out_d, diff --git a/paddle/phi/kernels/interpolate_kernel.h b/paddle/phi/kernels/interpolate_kernel.h index 4623657f5a5..c531461c12e 100644 --- a/paddle/phi/kernels/interpolate_kernel.h +++ b/paddle/phi/kernels/interpolate_kernel.h @@ -22,9 +22,9 @@ template void BilinearInterpKernel( const Context& ctx, const DenseTensor& x, - paddle::optional out_size, - paddle::optional> size_tensor, - paddle::optional scale_tensor, + const paddle::optional& out_size, + const paddle::optional>& size_tensor, + const paddle::optional& scale_tensor, const std::string& data_layout, int out_d, int out_h, @@ -39,10 +39,9 @@ template void NearestInterpKernel( const Context& ctx, const DenseTensor& x, - paddle::optional out_size, - paddle::optional> size_tensor, - - paddle::optional scale_tensor, + const paddle::optional& out_size, + const paddle::optional>& size_tensor, + const paddle::optional& scale_tensor, const std::string& data_layout, int out_d, int out_h, @@ -57,10 +56,9 @@ template void TrilinearInterpKernel( const Context& ctx, const DenseTensor& x, - paddle::optional out_size, - paddle::optional> size_tensor, - - paddle::optional scale_tensor, + const paddle::optional& out_size, + const paddle::optional>& size_tensor, + const paddle::optional& scale_tensor, const std::string& data_layout, int out_d, int out_h, @@ -75,10 +73,9 @@ template void LinearInterpKernel( const Context& ctx, const DenseTensor& x, - paddle::optional out_size, - paddle::optional> size_tensor, - - paddle::optional scale_tensor, + const paddle::optional& out_size, + const paddle::optional>& size_tensor, + const paddle::optional& scale_tensor, const std::string& data_layout, int out_d, int out_h, @@ -93,10 +90,9 @@ template void BicubicInterpKernel( const Context& ctx, const DenseTensor& x, - paddle::optional out_size, - paddle::optional> size_tensor, - - paddle::optional scale_tensor, + const paddle::optional& out_size, + const paddle::optional>& size_tensor, + const paddle::optional& scale_tensor, const std::string& data_layout, int out_d, int out_h, diff --git a/paddle/phi/kernels/label_smooth_kernel.h b/paddle/phi/kernels/label_smooth_kernel.h index b7e1f270889..2db35e1bff3 100644 --- a/paddle/phi/kernels/label_smooth_kernel.h +++ b/paddle/phi/kernels/label_smooth_kernel.h @@ -23,7 +23,7 @@ namespace phi { template void LabelSmoothKernel(const Context& ctx, const DenseTensor& label, - paddle::optional prior_dist, + const paddle::optional& prior_dist, float epsilon, DenseTensor* out); diff --git a/paddle/phi/kernels/layer_norm_grad_kernel.h b/paddle/phi/kernels/layer_norm_grad_kernel.h index 65f19a11b94..7d7cd13109b 100644 --- a/paddle/phi/kernels/layer_norm_grad_kernel.h +++ b/paddle/phi/kernels/layer_norm_grad_kernel.h @@ -21,8 +21,8 @@ namespace phi { template void LayerNormGradKernel(const Context& ctx, const DenseTensor& x, - paddle::optional scale, - paddle::optional bias, + const paddle::optional& scale, + const paddle::optional& bias, const DenseTensor& mean, const DenseTensor& variance, const DenseTensor& out_grad, diff --git a/paddle/phi/kernels/layer_norm_kernel.h b/paddle/phi/kernels/layer_norm_kernel.h index c9679420bda..26c04b61af9 100644 --- a/paddle/phi/kernels/layer_norm_kernel.h +++ b/paddle/phi/kernels/layer_norm_kernel.h @@ -22,8 +22,8 @@ namespace phi { template void LayerNormKernel(const Context& ctx, const DenseTensor& x, - paddle::optional scale, - paddle::optional bias, + const paddle::optional& scale, + const paddle::optional& bias, float epsilon, int begin_norm_axis, bool is_test, diff --git a/paddle/phi/kernels/matmul_grad_kernel.h b/paddle/phi/kernels/matmul_grad_kernel.h index 41a835db46f..47c6acdcb39 100644 --- a/paddle/phi/kernels/matmul_grad_kernel.h +++ b/paddle/phi/kernels/matmul_grad_kernel.h @@ -34,8 +34,8 @@ void MatmulDoubleGradKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& y, const DenseTensor& dout, - paddle::optional ddx, - paddle::optional ddy, + const paddle::optional& ddx, + const paddle::optional& ddy, bool transpose_x, bool transpose_y, DenseTensor* dx, @@ -49,9 +49,9 @@ void MatmulTripleGradKernel(const Context& dev_ctx, const DenseTensor& dout, const DenseTensor& ddx, const DenseTensor& ddy, - paddle::optional d_dx, - paddle::optional d_dy, - paddle::optional d_ddout, + const paddle::optional& d_dx, + const paddle::optional& d_dy, + const paddle::optional& d_ddout, bool transpose_x, bool transpose_y, DenseTensor* out_d_x, @@ -76,8 +76,8 @@ void MatmulWithFlattenDoubleGradKernel( const DenseTensor& x, const DenseTensor& y, const DenseTensor& out_grad, - paddle::optional x_grad_grad, - paddle::optional y_grad_grad, + const paddle::optional& x_grad_grad, + const paddle::optional& y_grad_grad, int x_num_col_dims, int y_num_col_dims, DenseTensor* x_grad, diff --git a/paddle/phi/kernels/momentum_kernel.h b/paddle/phi/kernels/momentum_kernel.h index b4ba449aaf3..172b345af16 100644 --- a/paddle/phi/kernels/momentum_kernel.h +++ b/paddle/phi/kernels/momentum_kernel.h @@ -25,7 +25,7 @@ void MomentumDenseKernel(const Context& dev_ctx, const DenseTensor& grad, const DenseTensor& velocity, const DenseTensor& learning_rate, - paddle::optional master_param, + const paddle::optional& master_param, float mu, bool use_nesterov, const std::string& regularization_method, @@ -42,7 +42,7 @@ void MomentumSparseKernel(const Context& dev_ctx, const SelectedRows& grad, const DenseTensor& velocity, const DenseTensor& learning_rate, - paddle::optional master_param, + const paddle::optional& master_param, float mu, bool use_nesterov, const std::string& regularization_method, diff --git a/paddle/phi/kernels/nll_loss_grad_kernel.h b/paddle/phi/kernels/nll_loss_grad_kernel.h index c06f0726899..b682edc24df 100644 --- a/paddle/phi/kernels/nll_loss_grad_kernel.h +++ b/paddle/phi/kernels/nll_loss_grad_kernel.h @@ -22,7 +22,7 @@ template void NllLossGradKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& label, - paddle::optional weight, + const paddle::optional& weight, const DenseTensor& total_weight, const DenseTensor& d_out, int64_t ignore_index, diff --git a/paddle/phi/kernels/nll_loss_kernel.cc b/paddle/phi/kernels/nll_loss_kernel.cc index b271f0f4d06..cf6d4d01410 100644 --- a/paddle/phi/kernels/nll_loss_kernel.cc +++ b/paddle/phi/kernels/nll_loss_kernel.cc @@ -19,7 +19,7 @@ template void NllLossKernel(const Context& dev_ctx, const DenseTensor& input, const DenseTensor& label, - paddle::optional weight, + const paddle::optional& weight, int64_t ignore_index, const std::string& reduction, DenseTensor* out) { diff --git a/paddle/phi/kernels/nll_loss_kernel.h b/paddle/phi/kernels/nll_loss_kernel.h index 90083e1d684..cffaa314860 100644 --- a/paddle/phi/kernels/nll_loss_kernel.h +++ b/paddle/phi/kernels/nll_loss_kernel.h @@ -24,7 +24,7 @@ template void NllLossRawKernel(const Context& dev_ctx, const DenseTensor& input, const DenseTensor& label, - paddle::optional weight, + const paddle::optional& weight, int64_t ignore_index, const std::string& reduction, DenseTensor* out, diff --git a/paddle/phi/kernels/psroi_pool_grad_kernel.h b/paddle/phi/kernels/psroi_pool_grad_kernel.h index 87163eb8e07..8dcf81194e2 100644 --- a/paddle/phi/kernels/psroi_pool_grad_kernel.h +++ b/paddle/phi/kernels/psroi_pool_grad_kernel.h @@ -23,7 +23,7 @@ template void PsroiPoolGradKernel(const Context& ctx, const DenseTensor& x, const DenseTensor& rois, - paddle::optional rois_num, + const paddle::optional& rois_num, const DenseTensor& dout, int pooled_height, int pooled_width, diff --git a/paddle/phi/kernels/psroi_pool_kernel.h b/paddle/phi/kernels/psroi_pool_kernel.h index 341037af2ca..5838fa89511 100644 --- a/paddle/phi/kernels/psroi_pool_kernel.h +++ b/paddle/phi/kernels/psroi_pool_kernel.h @@ -23,7 +23,7 @@ template void PsroiPoolKernel(const Context& ctx, const DenseTensor& x, const DenseTensor& rois, - paddle::optional rois_num, + const paddle::optional& rois_num, int pooled_height, int pooled_width, int output_channels, diff --git a/paddle/phi/kernels/rmsprop_kernel.h b/paddle/phi/kernels/rmsprop_kernel.h index 4c3c9aa8221..fba2095cc8b 100644 --- a/paddle/phi/kernels/rmsprop_kernel.h +++ b/paddle/phi/kernels/rmsprop_kernel.h @@ -26,7 +26,7 @@ void RmspropDenseKernel(const Context& dev_ctx, const DenseTensor& grad, const DenseTensor& moment, const DenseTensor& learning_rate, - paddle::optional mean_grad, + const paddle::optional& mean_grad, float epsilon, float decay, float momentum, @@ -43,7 +43,7 @@ void RmspropSparseKernel(const Context& dev_ctx, const SelectedRows& grad, const DenseTensor& moment, const DenseTensor& learning_rate, - paddle::optional mean_grad, + const paddle::optional& mean_grad, float epsilon, float decay, float momentum, diff --git a/paddle/phi/kernels/rnn_grad_kernel.h b/paddle/phi/kernels/rnn_grad_kernel.h index e5b1100cf72..024ed287bb1 100644 --- a/paddle/phi/kernels/rnn_grad_kernel.h +++ b/paddle/phi/kernels/rnn_grad_kernel.h @@ -24,7 +24,7 @@ void RnnGradKernel(const Context& dev_ctx, const DenseTensor& x, const std::vector& pre_state, const std::vector& weight_list, - paddle::optional sequence_length, + const paddle::optional& sequence_length, const DenseTensor& out, const DenseTensor& dropout_state, const DenseTensor& reserve, diff --git a/paddle/phi/kernels/rnn_kernel.h b/paddle/phi/kernels/rnn_kernel.h index f1534aa5988..61dfb6f56d7 100644 --- a/paddle/phi/kernels/rnn_kernel.h +++ b/paddle/phi/kernels/rnn_kernel.h @@ -24,7 +24,7 @@ void RnnKernel(const Context& dev_ctx, const DenseTensor& x, const std::vector& pre_state, const std::vector& weight_list, - paddle::optional sequence_length, + const paddle::optional& sequence_length, float dropout_prob, bool is_bidirec, int input_size, diff --git a/paddle/phi/kernels/roi_align_grad_kernel.h b/paddle/phi/kernels/roi_align_grad_kernel.h index eea1fa03886..a7c2ed3beb5 100644 --- a/paddle/phi/kernels/roi_align_grad_kernel.h +++ b/paddle/phi/kernels/roi_align_grad_kernel.h @@ -23,7 +23,7 @@ template void RoiAlignGradKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& boxes, - paddle::optional boxes_num, + const paddle::optional& boxes_num, const DenseTensor& out_grad, int pooled_height, int pooled_width, diff --git a/paddle/phi/kernels/roi_align_kernel.h b/paddle/phi/kernels/roi_align_kernel.h index 9734da53b7f..fa3161e3238 100644 --- a/paddle/phi/kernels/roi_align_kernel.h +++ b/paddle/phi/kernels/roi_align_kernel.h @@ -23,7 +23,7 @@ template void RoiAlignKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& boxes, - paddle::optional boxes_num, + const paddle::optional& boxes_num, int pooled_height, int pooled_width, float spatial_scale, diff --git a/paddle/phi/kernels/roi_pool_grad_kernel.h b/paddle/phi/kernels/roi_pool_grad_kernel.h index d7f1c378f75..f18bd1d65e6 100644 --- a/paddle/phi/kernels/roi_pool_grad_kernel.h +++ b/paddle/phi/kernels/roi_pool_grad_kernel.h @@ -23,7 +23,7 @@ template void RoiPooGradKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& boxes, - paddle::optional boxes_num, + const paddle::optional& boxes_num, const DenseTensor& arg_max, const DenseTensor& out_grad, int pooled_height, diff --git a/paddle/phi/kernels/roi_pool_kernel.h b/paddle/phi/kernels/roi_pool_kernel.h index c6ff6f22361..e7ed2587968 100644 --- a/paddle/phi/kernels/roi_pool_kernel.h +++ b/paddle/phi/kernels/roi_pool_kernel.h @@ -25,7 +25,7 @@ template void RoiPoolKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& boxes, - paddle::optional boxes_num, + const paddle::optional& boxes_num, int pooled_height, int pooled_width, float spatial_scale, diff --git a/paddle/phi/kernels/segment_pool_grad_kernel.h b/paddle/phi/kernels/segment_pool_grad_kernel.h index e773eed16e8..edf9ff9c756 100644 --- a/paddle/phi/kernels/segment_pool_grad_kernel.h +++ b/paddle/phi/kernels/segment_pool_grad_kernel.h @@ -23,7 +23,7 @@ void SegmentPoolGradKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& segment_ids, const DenseTensor& out, - paddle::optional summed_ids, + const paddle::optional& summed_ids, const DenseTensor& out_grad, const std::string& pooltype, DenseTensor* x_grad); diff --git a/paddle/phi/kernels/selected_rows/adam_kernel.h b/paddle/phi/kernels/selected_rows/adam_kernel.h index 2e13d29d172..79f87a8ed75 100644 --- a/paddle/phi/kernels/selected_rows/adam_kernel.h +++ b/paddle/phi/kernels/selected_rows/adam_kernel.h @@ -31,8 +31,8 @@ void AdamDenseParamSparseGradKernel( const DenseTensor& moment2, const DenseTensor& beta1_pow, const DenseTensor& beta2_pow, - paddle::optional master_param, - paddle::optional skip_update, + const paddle::optional& master_param, + const paddle::optional& skip_update, const Scalar& beta1, const Scalar& beta2, const Scalar& epsilon, diff --git a/paddle/phi/kernels/selected_rows/adamw_kernel.h b/paddle/phi/kernels/selected_rows/adamw_kernel.h index ddb155ce450..5dda8107d52 100644 --- a/paddle/phi/kernels/selected_rows/adamw_kernel.h +++ b/paddle/phi/kernels/selected_rows/adamw_kernel.h @@ -31,8 +31,8 @@ void AdamwDenseParamSparseGradKernel( const DenseTensor& moment2, const DenseTensor& beta1_pow, const DenseTensor& beta2_pow, - paddle::optional master_param, - paddle::optional skip_update, + const paddle::optional& master_param, + const paddle::optional& skip_update, const Scalar& beta1, const Scalar& beta2, const Scalar& epsilon, diff --git a/paddle/phi/kernels/selected_rows/assign_kernel.cc b/paddle/phi/kernels/selected_rows/assign_kernel.cc index fae876facfc..f0c0ffb591a 100644 --- a/paddle/phi/kernels/selected_rows/assign_kernel.cc +++ b/paddle/phi/kernels/selected_rows/assign_kernel.cc @@ -20,7 +20,7 @@ namespace phi { namespace sr { -// Note: use `const paddle::optional x` +// Note: use `const paddle::optional& x` // as input if needed template void AssignKernel(const Context& dev_ctx, diff --git a/paddle/phi/kernels/selected_rows/cpu/adam_kernel.cc b/paddle/phi/kernels/selected_rows/cpu/adam_kernel.cc index 57e33beb95e..d96c707538e 100644 --- a/paddle/phi/kernels/selected_rows/cpu/adam_kernel.cc +++ b/paddle/phi/kernels/selected_rows/cpu/adam_kernel.cc @@ -35,8 +35,8 @@ void AdamDenseParamSparseGradKernel( const DenseTensor& moment2, const DenseTensor& beta1_pow, const DenseTensor& beta2_pow, - paddle::optional master_param, - paddle::optional skip_update, + const paddle::optional& master_param, + const paddle::optional& skip_update, const Scalar& beta1, const Scalar& beta2, const Scalar& epsilon, diff --git a/paddle/phi/kernels/selected_rows/cpu/adamw_kernel.cc b/paddle/phi/kernels/selected_rows/cpu/adamw_kernel.cc index a52bca76110..6d2fc164d6b 100644 --- a/paddle/phi/kernels/selected_rows/cpu/adamw_kernel.cc +++ b/paddle/phi/kernels/selected_rows/cpu/adamw_kernel.cc @@ -35,8 +35,8 @@ void AdamwDenseParamSparseGradKernel( const DenseTensor& moment2, const DenseTensor& beta1_pow, const DenseTensor& beta2_pow, - paddle::optional master_param, - paddle::optional skip_update, + const paddle::optional& master_param, + const paddle::optional& skip_update, const Scalar& beta1, const Scalar& beta2, const Scalar& epsilon, diff --git a/paddle/phi/kernels/selected_rows/gpu/adam_kernel.cu b/paddle/phi/kernels/selected_rows/gpu/adam_kernel.cu index 31abac14995..18b6da818a1 100644 --- a/paddle/phi/kernels/selected_rows/gpu/adam_kernel.cu +++ b/paddle/phi/kernels/selected_rows/gpu/adam_kernel.cu @@ -102,8 +102,8 @@ void AdamDenseParamSparseGradKernel( const DenseTensor& moment2, const DenseTensor& beta1_pow, const DenseTensor& beta2_pow, - paddle::optional master_param, - paddle::optional skip_update, + const paddle::optional& master_param, + const paddle::optional& skip_update, const Scalar& beta1, const Scalar& beta2, const Scalar& epsilon, diff --git a/paddle/phi/kernels/selected_rows/gpu/adamw_kernel.cu b/paddle/phi/kernels/selected_rows/gpu/adamw_kernel.cu index b847f48d122..182c4390b17 100644 --- a/paddle/phi/kernels/selected_rows/gpu/adamw_kernel.cu +++ b/paddle/phi/kernels/selected_rows/gpu/adamw_kernel.cu @@ -112,8 +112,8 @@ void AdamwDenseParamSparseGradKernel( const DenseTensor& moment2, const DenseTensor& beta1_pow, const DenseTensor& beta2_pow, - paddle::optional master_param, - paddle::optional skip_update, + const paddle::optional& master_param, + const paddle::optional& skip_update, const Scalar& beta1, const Scalar& beta2, const Scalar& epsilon, diff --git a/paddle/phi/kernels/selected_rows/hierarchical_sigmoid_grad_kernel.cc b/paddle/phi/kernels/selected_rows/hierarchical_sigmoid_grad_kernel.cc index 1660601bbd3..616786d210d 100644 --- a/paddle/phi/kernels/selected_rows/hierarchical_sigmoid_grad_kernel.cc +++ b/paddle/phi/kernels/selected_rows/hierarchical_sigmoid_grad_kernel.cc @@ -40,9 +40,9 @@ void HierarchicalSigmoidGradKernel(const Context& ctx, const DenseTensor& x, const DenseTensor& w, const DenseTensor& label, - paddle::optional path, - paddle::optional code, - paddle::optional bias, + const paddle::optional& path, + const paddle::optional& code, + const paddle::optional& bias, const DenseTensor& pre_out, const DenseTensor& out_grad, int num_classes, diff --git a/paddle/phi/kernels/selected_rows/hierarchical_sigmoid_grad_kernel.h b/paddle/phi/kernels/selected_rows/hierarchical_sigmoid_grad_kernel.h index 4c03b83d80f..aca355f515c 100644 --- a/paddle/phi/kernels/selected_rows/hierarchical_sigmoid_grad_kernel.h +++ b/paddle/phi/kernels/selected_rows/hierarchical_sigmoid_grad_kernel.h @@ -25,9 +25,9 @@ void HierarchicalSigmoidGradKernel(const Context& ctx, const DenseTensor& x, const DenseTensor& w, const DenseTensor& label, - paddle::optional path, - paddle::optional code, - paddle::optional bias, + const paddle::optional& path, + const paddle::optional& code, + const paddle::optional& bias, const DenseTensor& pre_out, const DenseTensor& out_grad, int num_classes, diff --git a/paddle/phi/kernels/sgd_kernel.h b/paddle/phi/kernels/sgd_kernel.h index 12361c738e2..226a719b902 100644 --- a/paddle/phi/kernels/sgd_kernel.h +++ b/paddle/phi/kernels/sgd_kernel.h @@ -24,7 +24,7 @@ void SGDDenseKernel(const Context& dev_ctx, const DenseTensor& param, const DenseTensor& learning_rate, const DenseTensor& grad, - paddle::optional master_param, + const paddle::optional& master_param, bool multi_precision, DenseTensor* param_out, DenseTensor* master_param_out); @@ -35,7 +35,7 @@ void SGDDenseParamSparseGradKernel( const DenseTensor& param, const DenseTensor& learning_rate, const SelectedRows& grad, - paddle::optional master_param, + const paddle::optional& master_param, bool multi_precision, DenseTensor* param_out, DenseTensor* master_param_out); @@ -46,7 +46,7 @@ void SGDSparseParamSparseGradKernel( const SelectedRows& param, const DenseTensor& learning_rate, const SelectedRows& grad, - paddle::optional master_param, + const paddle::optional& master_param, bool multi_precision, SelectedRows* param_out, SelectedRows* master_param_out); diff --git a/paddle/phi/kernels/warpctc_grad_kernel.h b/paddle/phi/kernels/warpctc_grad_kernel.h index 8e1ab43324a..8a8251aabe4 100644 --- a/paddle/phi/kernels/warpctc_grad_kernel.h +++ b/paddle/phi/kernels/warpctc_grad_kernel.h @@ -24,7 +24,7 @@ void WarpctcGradKernel(const Context& dev_ctx, const DenseTensor& warpctc_grad, const DenseTensor& logits, const DenseTensor& loss_grad, - paddle::optional logits_length, + const paddle::optional& logits_length, int blank, bool norm_by_times, DenseTensor* logits_grad); diff --git a/paddle/phi/kernels/warpctc_kernel.h b/paddle/phi/kernels/warpctc_kernel.h index 4baa4906477..0b9e9eb87f6 100644 --- a/paddle/phi/kernels/warpctc_kernel.h +++ b/paddle/phi/kernels/warpctc_kernel.h @@ -23,8 +23,8 @@ template void WarpctcKernel(const Context& dev_ctx, const DenseTensor& logits, const DenseTensor& label, - paddle::optional logits_length, - paddle::optional labels_length, + const paddle::optional& logits_length, + const paddle::optional& labels_length, int blank, bool norm_by_times, DenseTensor* warpctc_grad, diff --git a/paddle/phi/kernels/yolov3_loss_grad_kernel.h b/paddle/phi/kernels/yolov3_loss_grad_kernel.h index 789e782443f..b4ce5b95398 100644 --- a/paddle/phi/kernels/yolov3_loss_grad_kernel.h +++ b/paddle/phi/kernels/yolov3_loss_grad_kernel.h @@ -23,7 +23,7 @@ void Yolov3LossGradKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& gt_box, const DenseTensor& gt_label, - paddle::optional gt_score, + const paddle::optional& gt_score, const DenseTensor& loss_grad, const DenseTensor& objectness_mask, const DenseTensor& gt_match_mask, diff --git a/paddle/phi/kernels/yolov3_loss_kernel.h b/paddle/phi/kernels/yolov3_loss_kernel.h index eb6668000de..3dabe5ce820 100644 --- a/paddle/phi/kernels/yolov3_loss_kernel.h +++ b/paddle/phi/kernels/yolov3_loss_kernel.h @@ -23,7 +23,7 @@ void Yolov3LossKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& gt_box, const DenseTensor& gt_label, - paddle::optional gt_score, + const paddle::optional& gt_score, const std::vector& anchors, const std::vector& anchor_mask, int class_num, diff --git a/python/paddle/utils/code_gen/api_base.py b/python/paddle/utils/code_gen/api_base.py index 146925ccef6..1638f6afab2 100644 --- a/python/paddle/utils/code_gen/api_base.py +++ b/python/paddle/utils/code_gen/api_base.py @@ -141,7 +141,7 @@ class BaseAPI(object): 'int[]': 'const std::vector&' } optional_types_trans = { - 'Tensor': 'paddle::optional', + 'Tensor': 'const paddle::optional&', 'Tensor[]': 'const paddle::optional>&', 'int': 'paddle::optional', 'int32_t': 'paddle::optional', @@ -512,18 +512,7 @@ PADDLE_API {self.get_return_type(inplace_flag=True)} {api_func_name}({self.get_d param_code = param_code + param + "_metas, " elif param in self.optional_vars: - meta_tensor_code = meta_tensor_code + f""" -{code_indent} paddle::optional {PREFIX_TENSOR_NAME}meta_ref_{param} = paddle::none; -{code_indent} phi::DenseTensor {param}_dt; -{code_indent} phi::MetaTensor {PREFIX_TENSOR_NAME}meta_tmp_{param}({param}_dt); -{code_indent} if ({PREFIX_TENSOR_NAME}{param}_ptr) {{ -{code_indent} {PREFIX_TENSOR_NAME}meta_tmp_{param}.set_dtype( {PREFIX_TENSOR_NAME}{param}_ptr->dtype() ); -{code_indent} {PREFIX_TENSOR_NAME}meta_tmp_{param}.set_dims( {PREFIX_TENSOR_NAME}{param}_ptr->dims() ); -{code_indent} {PREFIX_TENSOR_NAME}meta_tmp_{param}.set_layout( {PREFIX_TENSOR_NAME}{param}_ptr->layout() ); -{code_indent} {PREFIX_TENSOR_NAME}meta_ref_{param} = {PREFIX_TENSOR_NAME}meta_tmp_{param}; -{code_indent} }}\n""" - - param_code = param_code + f"{PREFIX_TENSOR_NAME}meta_ref_{param}, " + param_code = param_code + "MakeMetaTensor(" + PREFIX_TENSOR_NAME + param + "), " else: raise ValueError( f"{self.api} : Param of infer_meta error : {self.inputs['input_info'][param]} type is not supported." @@ -568,8 +557,8 @@ PADDLE_API {self.get_return_type(inplace_flag=True)} {api_func_name}({self.get_d 'const std::vector&', 'const paddle::optional': 'paddle::optional', - 'paddle::optional': - 'paddle::optional', + 'const paddle::optional&': + 'const paddle::optional&', 'const paddle::optional>&': 'paddle::optional&>' } @@ -597,11 +586,7 @@ PADDLE_API {self.get_return_type(inplace_flag=True)} {api_func_name}({self.get_d trans_flag = "{false, true}" if input_name in self.optional_vars: input_tensor_code = input_tensor_code + f""" -{code_indent} {input_trans_map[input_infos[input_name]]} {PREFIX_TENSOR_NAME}{input_name}(paddle::none); -{code_indent} auto {PREFIX_TENSOR_NAME}{input_name}_ptr = PrepareData({input_name}, kernel.InputAt({i}), {trans_flag}); -{code_indent} if ({PREFIX_TENSOR_NAME}{input_name}_ptr) {{ -{code_indent} {PREFIX_TENSOR_NAME}{input_name} = paddle::make_optional(*{PREFIX_TENSOR_NAME}{input_name}_ptr); -{code_indent} }}""" +{code_indent} auto {PREFIX_TENSOR_NAME}{input_name} = PrepareData({input_name}, kernel.InputAt({i}), {trans_flag});""" else: if self.inputs['input_info'][input_name] == "const Tensor&": @@ -677,7 +662,7 @@ PADDLE_API {self.get_return_type(inplace_flag=True)} {api_func_name}({self.get_d input_trans_map = { 'const Tensor&': 'const phi::SelectedRows&', 'const paddle::optional&': - 'paddle::optional' + 'const paddle::optional&' } out_trans_map = {'Tensor': 'phi::SelectedRows*'} input_names = self.inputs['names'] diff --git a/python/paddle/utils/code_gen/type_mapping.py b/python/paddle/utils/code_gen/type_mapping.py index ecbd1f494c2..c6e110907a9 100644 --- a/python/paddle/utils/code_gen/type_mapping.py +++ b/python/paddle/utils/code_gen/type_mapping.py @@ -108,7 +108,7 @@ dense_output_types_map = { sr_input_types_map = {'Tensor': 'const phi::SelectedRows&', } sr_optional_input_types_map = { - 'Tensor': 'paddle::optional', + 'Tensor': 'const paddle::optional&', } sr_output_types_map = {'Tensor': 'phi::SelectedRows*', } diff --git a/python/paddle/utils/code_gen/wrapped_infermeta_gen.py b/python/paddle/utils/code_gen/wrapped_infermeta_gen.py index c14d39e9842..bf798f9734d 100644 --- a/python/paddle/utils/code_gen/wrapped_infermeta_gen.py +++ b/python/paddle/utils/code_gen/wrapped_infermeta_gen.py @@ -43,10 +43,7 @@ PD_REGISTER_INFER_META_FN({api.kernel['func'][0]}, phi::{api.infer_meta['func']} 'const std::vector&': 'const std::vector&', 'Tensor': 'MetaTensor*', 'std::vector': 'std::vector*', - 'const paddle::optional': - 'const paddle::optional', - 'paddle::optional': - 'paddle::optional' + 'const paddle::optional&': 'const MetaTensor&' } wrapped_infermeta_name = get_wrapped_infermeta_name(api.api) -- GitLab