未验证 提交 6d78524c 编写于 作者: Z zyfncg 提交者: GitHub

[Phi] Change optional tensor from `optional<const Tensor&>` to `optional<Tensor>` (#42939)

* refactor the optional tensor

* remove optiona<MetaTensor> in InferMeta

* fix bug

* fix optional<vector<Tensor>>

* fix bug

* fix rmsprop

* fix amp of eager_gen

* polish code

* fix deleted code

* fix merge conflict

* polish code

* remove is_nullopt_

* fix merge conflict

* fix merge conflict
上级 2d873008
......@@ -345,14 +345,14 @@ AMP_LOGIC_TEMPLATE = \
CREATE_PLAIN_OPTIONAL_TENSOR_TEMPLATE = \
"""
paddle::optional<const paddle::experimental::Tensor&> {}_optional = paddle::none;
if({}.initialized()) {}_optional = paddle::make_optional<const paddle::experimental::Tensor&>({});
paddle::optional<paddle::experimental::Tensor> {}_optional;
if({}.initialized()) {}_optional = paddle::make_optional<paddle::experimental::Tensor>({});
"""
CREATE_RECOVER_OPTIONAL_TENSOR_TEMPLATE = \
"""
paddle::optional<const paddle::experimental::Tensor&> {}_optional = paddle::none;
if( {}.impl() ) {}_optional = paddle::make_optional<const paddle::experimental::Tensor&>({});
paddle::optional<paddle::experimental::Tensor> {}_optional;
if( {}.impl() ) {}_optional = paddle::make_optional<paddle::experimental::Tensor>({});
"""
CHECK_BACKWARD_INPLACE_TEMPLATE = \
......@@ -713,7 +713,7 @@ class DygraphFunctionGeneratorBase(FunctionGeneratorBase):
if is_fwd_input:
if is_optional:
set_tensor_wrappers = f"{indent}if({name}.get_ptr() != nullptr) grad_node->SetTensorWrapper{name}(*({name}.get_ptr()));"
set_tensor_wrappers = f"{indent}if({name}) grad_node->SetTensorWrapper{name}(*{name});"
else:
set_tensor_wrappers = f"{indent}grad_node->SetTensorWrapper{name}({name});"
set_input_tensor_wrappers_list.append(set_tensor_wrappers)
......@@ -724,7 +724,7 @@ class DygraphFunctionGeneratorBase(FunctionGeneratorBase):
), AssertMessage(name, forward_outputs_position_map.keys())
if is_optional:
set_tensor_wrappers = f"{indent}if({name}.get_ptr() != nullptr) grad_node->SetTensorWrapper{name}(*({name}.get_ptr()));"
set_tensor_wrappers = f"{indent}if({name}) grad_node->SetTensorWrapper{name}(*{name});"
else:
set_tensor_wrappers = f"{indent}grad_node->SetTensorWrapper{name}({name});"
set_output_tensor_wrappers_list.append(set_tensor_wrappers)
......@@ -888,15 +888,12 @@ class DygraphForwardFunctionGenerator(DygraphFunctionGeneratorBase):
is_optional = (name in optional_inputs)
if IsPlainTensorType(ttype):
if is_optional:
arg_str = f"const paddle::optional<const paddle::experimental::Tensor&> {name}"
arg_str = f"const paddle::optional<paddle::experimental::Tensor>& {name}"
amp_tensors_vector_optional_list.append(
f"if ({name}.get_ptr() != nullptr) amp_tensors_vector.push_back({{ *({name}.get_ptr()) }});\n"
f"if ({name}) amp_tensors_vector.push_back({{ *{name} }});\n"
)
amp_autocast_optional_list.append(
f"auto NEW_{name}_temp_tensor = ({name}.get_ptr() != nullptr) ? egr::EagerAmpAutoCast(\"{name}\", *({name}.get_ptr()), amp_dst_dtype, op_name) : paddle::experimental::Tensor();\n"
)
amp_autocast_optional_list.append(
f"auto NEW_{name} = ({name}.get_ptr() != nullptr) ? paddle::make_optional<const paddle::experimental::Tensor&>(NEW_{name}_temp_tensor) : {name};\n"
f"auto NEW_{name} = egr::EagerAmpAutoCast(\"{name}\", {name}, amp_dst_dtype, op_name);\n"
)
else:
if is_inplaced and forward_inplace_map and name in forward_inplace_map.keys(
......
......@@ -60,7 +60,8 @@ inline std::vector<paddle::experimental::Tensor> EagerAmpAutoCasts(
inline paddle::experimental::Tensor EagerAmpAutoCast(
const std::string& input_name, const paddle::experimental::Tensor& input,
const paddle::experimental::DataType& dst_dtype, std::string op_name) {
const paddle::experimental::DataType& dst_dtype,
const std::string& op_name) {
VLOG(6) << "AMP AmpAutoCasts:"
<< " input(" << input_name << ") dst_dtype("
<< paddle::framework::DataType2String(dst_dtype) << ").";
......@@ -87,4 +88,15 @@ inline paddle::experimental::Tensor EagerAmpAutoCast(
return input;
}
inline paddle::optional<paddle::experimental::Tensor> EagerAmpAutoCast(
const std::string& input_name,
const paddle::optional<paddle::experimental::Tensor>& input,
const paddle::experimental::DataType& dst_dtype,
const std::string& op_name) {
if (input) {
return EagerAmpAutoCast(input_name, *input, dst_dtype, op_name);
}
return paddle::none;
}
} // namespace egr
......@@ -73,7 +73,7 @@ AutogradMeta* EagerUtils::nullable_autograd_meta(
}
AutogradMeta* EagerUtils::nullable_autograd_meta(
paddle::optional<const paddle::experimental::Tensor&> target) {
const paddle::optional<paddle::experimental::Tensor>& target) {
if (target.get_ptr() != nullptr) {
return EagerUtils::nullable_autograd_meta(*(target.get_ptr()));
}
......
......@@ -125,7 +125,7 @@ class EagerUtils {
static AutogradMeta* nullable_autograd_meta(
const paddle::experimental::Tensor& target);
static AutogradMeta* nullable_autograd_meta(
paddle::optional<const paddle::experimental::Tensor&> target);
const paddle::optional<paddle::experimental::Tensor>& target);
static std::vector<AutogradMeta*> nullable_autograd_meta(
const std::vector<paddle::experimental::Tensor>& targets);
static std::vector<AutogradMeta*> nullable_autograd_meta(
......
......@@ -349,14 +349,6 @@ const phi::MetaTensor& CompatInferMetaContext::InputAt(size_t idx) const {
return compat_inputs_.at(idx);
}
paddle::optional<const phi::MetaTensor&>
CompatInferMetaContext::OptionalInputAt(size_t idx) const {
const auto& input = compat_inputs_.at(idx);
return input.initialized()
? paddle::optional<const phi::MetaTensor&>{input}
: paddle::optional<const phi::MetaTensor&>{paddle::none};
}
std::vector<const phi::MetaTensor*> CompatInferMetaContext::InputsBetween(
size_t start, size_t end) const {
std::vector<const phi::MetaTensor*> result;
......@@ -370,7 +362,7 @@ std::vector<const phi::MetaTensor*> CompatInferMetaContext::InputsBetween(
return result;
}
paddle::optional<const std::vector<const phi::MetaTensor*>>
paddle::optional<std::vector<const phi::MetaTensor*>>
CompatInferMetaContext::OptionalInputsBetween(size_t start, size_t end) const {
const auto& first = compat_inputs_.at(start);
......@@ -383,10 +375,10 @@ CompatInferMetaContext::OptionalInputsBetween(size_t start, size_t end) const {
result.emplace_back(in.initialized() ? &in : nullptr);
}
return paddle::optional<const std::vector<const phi::MetaTensor*>>(result);
return paddle::optional<std::vector<const phi::MetaTensor*>>(
std::move(result));
}
return paddle::optional<const std::vector<const phi::MetaTensor*>>(
paddle::none);
return paddle::none;
}
phi::MetaTensor* CompatInferMetaContext::MutableOutputAt(size_t idx) {
......
......@@ -59,6 +59,12 @@ class CompatMetaTensor : public phi::MetaTensor {
bool initialized() const override { return initialized_; };
operator unspecified_bool_type() const override {
return initialized_ ? unspecified_bool_true : 0;
}
bool operator!() const override { return !initialized_; }
private:
const LoD& GetRuntimeLoD() const {
auto* var = BOOST_GET_CONST(Variable*, var_);
......@@ -107,13 +113,11 @@ class CompatInferMetaContext : public phi::InferMetaContext {
outputs);
const phi::MetaTensor& InputAt(size_t idx) const override;
paddle::optional<const phi::MetaTensor&> OptionalInputAt(
size_t idx) const override;
std::vector<const phi::MetaTensor*> InputsBetween(size_t start,
size_t end) const override;
paddle::optional<const std::vector<const phi::MetaTensor*>>
OptionalInputsBetween(size_t start, size_t end) const override;
paddle::optional<std::vector<const phi::MetaTensor*>> OptionalInputsBetween(
size_t start, size_t end) const override;
phi::MetaTensor* MutableOutputAt(size_t idx) override;
std::vector<phi::MetaTensor*> MutableOutputBetween(size_t start,
......
......@@ -2370,15 +2370,12 @@ void OperatorWithKernel::BuildPhiKernelContext(
// deal with optional here
if ((it == ctx.inputs.end() || it->second.size() == 0) &&
(input_defs[i].type_index ==
std::type_index(
typeid(paddle::optional<const phi::DenseTensor&>)) ||
std::type_index(typeid(paddle::optional<phi::DenseTensor>)) ||
input_defs[i].type_index ==
std::type_index(
typeid(paddle::optional<const phi::SelectedRows&>)) ||
std::type_index(typeid(paddle::optional<phi::SelectedRows>)) ||
input_defs[i].type_index ==
std::type_index(
typeid(paddle::optional<
const std::vector<const phi::DenseTensor*>>)))) {
std::type_index(typeid(
paddle::optional<std::vector<const phi::DenseTensor*>>)))) {
pt_kernel_context->EmplaceBackInputWithoutSetRange(nullptr);
auto end_idx = start_idx + 1;
pt_kernel_context->AssignInputRange(std::make_pair(start_idx, end_idx),
......
......@@ -279,16 +279,14 @@ void BuildDygraphPhiKernelContext(const phi::KernelSignature& kernel_signature,
if (it == ins.end()) {
if (LIKELY(input_defs[i].type_index ==
std::type_index(
typeid(paddle::optional<const phi::DenseTensor&>)))) {
std::type_index(typeid(paddle::optional<phi::DenseTensor>)))) {
kernel_ctx->EmplaceBackInputWithoutSetRange(nullptr);
auto end_idx = start_idx + 1;
kernel_ctx->AssignInputRange(std::make_pair(start_idx, end_idx), i);
continue;
} else if (input_defs[i].type_index ==
std::type_index(
typeid(paddle::optional<
const std::vector<const phi::DenseTensor*>>))) {
std::type_index(typeid(
paddle::optional<std::vector<const phi::DenseTensor*>>))) {
kernel_ctx->EmplaceBackInputWithoutSetRange(nullptr);
auto end_idx = start_idx + 1;
kernel_ctx->AssignInputRange(std::make_pair(start_idx, end_idx), i);
......
......@@ -138,7 +138,7 @@ void LayerNorm(const std::vector<LayerNormParamType<T>> &scale,
const platform::CUDADeviceContext &ctx) {
framework::Scope scope;
auto place = ctx.GetPlace();
paddle::optional<const framework::LoDTensor &> scale_opt = paddle::none;
paddle::optional<framework::LoDTensor> scale_opt;
if (scale.size() > 0) {
auto var_scale = scope.Var("Scale");
auto tensor_scale = var_scale->GetMutable<framework::LoDTensor>();
......@@ -147,7 +147,7 @@ void LayerNorm(const std::vector<LayerNormParamType<T>> &scale,
scale_opt = *tensor_scale;
}
paddle::optional<const framework::LoDTensor &> bias_opt = paddle::none;
paddle::optional<framework::LoDTensor> bias_opt;
if (bias.size() > 0) {
auto var_bias = scope.Var("Bias");
auto tensor_bias = var_bias->GetMutable<framework::LoDTensor>();
......
......@@ -292,9 +292,9 @@ class InplaceABNGradKernel : public framework::OpKernel<T> {
auto* mean = ctx.Input<Tensor>("ReserveSpace");
auto* variance = ctx.Input<Tensor>("ReserveSpace");
paddle::optional<const Tensor&> space_opt = paddle::none;
paddle::optional<const Tensor&> mean_opt = paddle::none;
paddle::optional<const Tensor&> variance_opt = paddle::none;
paddle::optional<Tensor> space_opt;
paddle::optional<Tensor> mean_opt;
paddle::optional<Tensor> variance_opt;
if (reserve_space != nullptr) {
space_opt = *reserve_space;
......
......@@ -120,9 +120,9 @@ class InplaceABNGradKernel
auto* mean = ctx.Input<Tensor>("ReserveSpace");
auto* variance = ctx.Input<Tensor>("ReserveSpace");
paddle::optional<const Tensor&> space_opt = paddle::none;
paddle::optional<const Tensor&> mean_opt = paddle::none;
paddle::optional<const Tensor&> variance_opt = paddle::none;
paddle::optional<Tensor> space_opt;
paddle::optional<Tensor> mean_opt;
paddle::optional<Tensor> variance_opt;
if (reserve_space != nullptr) {
space_opt = *reserve_space;
......
......@@ -72,8 +72,7 @@ class DGCMomentumKernel : public framework::OpKernel<T> {
auto* velocity_out = context.Output<framework::Tensor>("VelocityOut");
auto* master_param_out =
context.Output<framework::Tensor>("MasterParamOut");
paddle::optional<const framework::Tensor&> master_param_opt =
paddle::none;
paddle::optional<framework::Tensor> master_param_opt(paddle::none);
float mu = context.Attr<float>("mu");
bool use_nesterov = context.Attr<bool>("use_nesterov");
std::string regularization_method =
......@@ -117,8 +116,7 @@ class DGCMomentumKernel : public framework::OpKernel<T> {
auto* param_out = context.Output<framework::Tensor>("ParamOut");
auto* master_param_out =
context.Output<framework::Tensor>("MasterParamOut");
paddle::optional<const framework::Tensor&> master_param_opt =
paddle::none;
paddle::optional<framework::Tensor> master_param_opt(paddle::none);
if (multi_precision) {
auto* master_param = context.Input<framework::Tensor>("MasterParam");
master_param_opt = *master_param;
......@@ -149,8 +147,7 @@ class DGCMomentumKernel : public framework::OpKernel<T> {
auto* param_out = context.Output<phi::SelectedRows>("ParamOut");
auto* master_param_out =
context.Output<phi::SelectedRows>("MasterParamOut");
paddle::optional<const phi::SelectedRows&> master_param_opt =
paddle::none;
paddle::optional<phi::SelectedRows> master_param_opt(paddle::none);
if (multi_precision) {
auto* master_param = context.Input<phi::SelectedRows>("MasterParam");
master_param_opt = *master_param;
......
......@@ -765,7 +765,7 @@ PyObject* ToPyObject(const std::unordered_map<std::wstring, int>& value) {
// For Final State Dygraph,
// We directly use paddle::optional(Tensor) as dispensable Tensor
paddle::optional<const paddle::experimental::Tensor&> GetOptionalTensorFromArgs(
paddle::optional<paddle::experimental::Tensor> GetOptionalTensorFromArgs(
const std::string& op_type, const std::string& arg_name, PyObject* args,
ssize_t arg_idx, bool dispensable) {
PyObject* obj = PyTuple_GET_ITEM(args, arg_idx);
......@@ -784,7 +784,7 @@ paddle::optional<const paddle::experimental::Tensor&> GetOptionalTensorFromArgs(
}
if (PyObject_IsInstance(obj, reinterpret_cast<PyObject*>(p_tensor_type))) {
return paddle::make_optional<const paddle::experimental::Tensor&>(
return paddle::make_optional<paddle::experimental::Tensor>(
reinterpret_cast<TensorObject*>(obj)->tensor);
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
......
......@@ -185,7 +185,7 @@ paddle::Place CastPyArg2Place(PyObject* obj, const std::string& op_type,
paddle::DataType CastPyArg2DataType(PyObject* obj, const std::string& op_type,
ssize_t arg_pos);
paddle::optional<const paddle::experimental::Tensor&> GetOptionalTensorFromArgs(
paddle::optional<paddle::experimental::Tensor> GetOptionalTensorFromArgs(
const std::string& op_type, const std::string& arg_name, PyObject* args,
ssize_t arg_idx, bool dispensable = false);
......
......@@ -41,8 +41,8 @@ std::tuple<Tensor, Tensor, Tensor, Tensor, Tensor, Tensor> adam_impl(
const Tensor& moment2,
const Tensor& beta1_pow,
const Tensor& beta2_pow,
paddle::optional<const Tensor&> master_param,
paddle::optional<const Tensor&> skip_update,
const paddle::optional<Tensor>& master_param,
const paddle::optional<Tensor>& skip_update,
const Scalar& beta1,
const Scalar& beta2,
const Scalar& epsilon,
......@@ -87,11 +87,8 @@ std::tuple<Tensor, Tensor, Tensor, Tensor, Tensor, Tensor> adam_impl(
auto input_moment2 = PrepareData(moment2, kernel.InputAt(4), {});
auto input_beta1_pow = PrepareData(beta1_pow, kernel.InputAt(5), {});
auto input_beta2_pow = PrepareData(beta2_pow, kernel.InputAt(6), {});
paddle::optional<const phi::DenseTensor&> input_master_param(paddle::none);
auto input_master_param_ptr =
PrepareData(master_param, kernel.InputAt(7), {});
paddle::optional<const phi::DenseTensor&> input_skip_update(paddle::none);
auto input_skip_update_ptr = PrepareData(skip_update, kernel.InputAt(8), {});
auto input_master_param = PrepareData(master_param, kernel.InputAt(7), {});
auto input_skip_update = PrepareData(skip_update, kernel.InputAt(8), {});
std::tuple<Tensor, Tensor, Tensor, Tensor, Tensor, Tensor> api_output;
auto kernel_out_0 = input_param.get();
......@@ -100,40 +97,13 @@ std::tuple<Tensor, Tensor, Tensor, Tensor, Tensor, Tensor> adam_impl(
auto kernel_out_3 = input_beta1_pow.get();
auto kernel_out_4 = input_beta2_pow.get();
phi::DenseTensor* kernel_out_5 = nullptr;
if (input_master_param_ptr) {
input_master_param =
paddle::make_optional<const phi::DenseTensor&>(*input_master_param_ptr);
kernel_out_5 =
paddle::make_optional<phi::DenseTensor&>(*input_master_param_ptr)
.get_ptr();
if (input_master_param) {
kernel_out_5 = input_master_param.get_ptr();
}
if (input_skip_update_ptr) {
input_skip_update =
paddle::make_optional<const phi::DenseTensor&>(*input_skip_update_ptr);
}
paddle::optional<const phi::MetaTensor&> input_meta_ref_master_param(
paddle::none);
phi::DenseTensor dt;
phi::MetaTensor input_meta_tmp_master_param(dt);
if (input_master_param_ptr) {
input_meta_tmp_master_param.set_dtype(input_master_param_ptr->dtype());
input_meta_tmp_master_param.set_dims(input_master_param_ptr->dims());
input_meta_tmp_master_param.set_layout(input_master_param_ptr->layout());
input_meta_ref_master_param = input_meta_tmp_master_param;
}
auto input_meta_ref_master_param = MakeMetaTensor(input_master_param);
paddle::optional<const phi::MetaTensor&> input_meta_ref_skip_update(
paddle::none);
phi::DenseTensor dt1;
phi::MetaTensor input_meta_tmp_skip_update(dt1);
if (input_skip_update_ptr) {
input_meta_tmp_skip_update.set_dtype(input_skip_update_ptr->dtype());
input_meta_tmp_skip_update.set_dims(input_skip_update_ptr->dims());
input_meta_tmp_skip_update.set_layout(input_skip_update_ptr->layout());
input_meta_ref_skip_update = input_meta_tmp_skip_update;
}
auto input_meta_ref_skip_update = MakeMetaTensor(input_skip_update);
phi::MetaTensor meta_out_0(kernel_out_0);
phi::MetaTensor meta_out_1(kernel_out_1);
......@@ -176,8 +146,8 @@ std::tuple<Tensor, Tensor, Tensor, Tensor, Tensor, Tensor> adam_impl(
const phi::DenseTensor&,
const phi::DenseTensor&,
const phi::DenseTensor&,
paddle::optional<const phi::DenseTensor&>,
paddle::optional<const phi::DenseTensor&>,
const paddle::optional<phi::DenseTensor>&,
const paddle::optional<phi::DenseTensor>&,
const Scalar&,
const Scalar&,
const Scalar&,
......@@ -250,8 +220,8 @@ std::tuple<Tensor, Tensor, Tensor, Tensor, Tensor, Tensor> adam_impl(
const phi::DenseTensor&,
const phi::DenseTensor&,
const phi::DenseTensor&,
paddle::optional<const phi::DenseTensor&>,
paddle::optional<const phi::DenseTensor&>,
const paddle::optional<phi::DenseTensor>&,
const paddle::optional<phi::DenseTensor>&,
const Scalar&,
const Scalar&,
const Scalar&,
......@@ -304,8 +274,8 @@ std::tuple<Tensor, Tensor, Tensor, Tensor, Tensor, Tensor> adamw_impl(
const Tensor& moment2,
const Tensor& beta1_pow,
const Tensor& beta2_pow,
paddle::optional<const Tensor&> master_param,
paddle::optional<const Tensor&> skip_update,
const paddle::optional<Tensor>& master_param,
const paddle::optional<Tensor>& skip_update,
const Scalar& beta1,
const Scalar& beta2,
const Scalar& epsilon,
......@@ -350,11 +320,8 @@ std::tuple<Tensor, Tensor, Tensor, Tensor, Tensor, Tensor> adamw_impl(
auto input_moment2 = PrepareData(moment2, kernel.InputAt(4), {});
auto input_beta1_pow = PrepareData(beta1_pow, kernel.InputAt(5), {});
auto input_beta2_pow = PrepareData(beta2_pow, kernel.InputAt(6), {});
paddle::optional<const phi::DenseTensor&> input_master_param(paddle::none);
auto input_master_param_ptr =
PrepareData(master_param, kernel.InputAt(7), {});
paddle::optional<const phi::DenseTensor&> input_skip_update(paddle::none);
auto input_skip_update_ptr = PrepareData(skip_update, kernel.InputAt(8), {});
auto input_master_param = PrepareData(master_param, kernel.InputAt(7), {});
auto input_skip_update = PrepareData(skip_update, kernel.InputAt(8), {});
std::tuple<Tensor, Tensor, Tensor, Tensor, Tensor, Tensor> api_output;
auto kernel_out_0 = input_param.get();
......@@ -363,40 +330,13 @@ std::tuple<Tensor, Tensor, Tensor, Tensor, Tensor, Tensor> adamw_impl(
auto kernel_out_3 = input_beta1_pow.get();
auto kernel_out_4 = input_beta2_pow.get();
phi::DenseTensor* kernel_out_5 = nullptr;
if (input_master_param_ptr) {
input_master_param =
paddle::make_optional<const phi::DenseTensor&>(*input_master_param_ptr);
kernel_out_5 =
paddle::make_optional<phi::DenseTensor&>(*input_master_param_ptr)
.get_ptr();
if (input_master_param) {
kernel_out_5 = input_master_param.get_ptr();
}
if (input_skip_update_ptr) {
input_skip_update =
paddle::make_optional<const phi::DenseTensor&>(*input_skip_update_ptr);
}
paddle::optional<const phi::MetaTensor&> input_meta_ref_master_param(
paddle::none);
phi::DenseTensor dt;
phi::MetaTensor input_meta_tmp_master_param(dt);
if (input_master_param_ptr) {
input_meta_tmp_master_param.set_dtype(input_master_param_ptr->dtype());
input_meta_tmp_master_param.set_dims(input_master_param_ptr->dims());
input_meta_tmp_master_param.set_layout(input_master_param_ptr->layout());
input_meta_ref_master_param = input_meta_tmp_master_param;
}
auto input_meta_ref_master_param = MakeMetaTensor(input_master_param);
paddle::optional<const phi::MetaTensor&> input_meta_ref_skip_update(
paddle::none);
phi::DenseTensor dt1;
phi::MetaTensor input_meta_tmp_skip_update(dt1);
if (input_skip_update_ptr) {
input_meta_tmp_skip_update.set_dtype(input_skip_update_ptr->dtype());
input_meta_tmp_skip_update.set_dims(input_skip_update_ptr->dims());
input_meta_tmp_skip_update.set_layout(input_skip_update_ptr->layout());
input_meta_ref_skip_update = input_meta_tmp_skip_update;
}
auto input_meta_ref_skip_update = MakeMetaTensor(input_skip_update);
phi::MetaTensor meta_out_0(kernel_out_0);
phi::MetaTensor meta_out_1(kernel_out_1);
......@@ -439,8 +379,8 @@ std::tuple<Tensor, Tensor, Tensor, Tensor, Tensor, Tensor> adamw_impl(
const phi::DenseTensor&,
const phi::DenseTensor&,
const phi::DenseTensor&,
paddle::optional<const phi::DenseTensor&>,
paddle::optional<const phi::DenseTensor&>,
const paddle::optional<phi::DenseTensor>&,
const paddle::optional<phi::DenseTensor>&,
const Scalar&,
const Scalar&,
const Scalar&,
......@@ -760,7 +700,7 @@ std::tuple<Tensor, Tensor, Tensor> momentum_impl(
const Tensor& grad,
const Tensor& velocity,
const Tensor& learning_rate,
paddle::optional<const Tensor&> master_param,
const paddle::optional<Tensor>& master_param,
float mu,
bool use_nesterov,
const std::string& regularization_method,
......@@ -801,32 +741,18 @@ std::tuple<Tensor, Tensor, Tensor> momentum_impl(
auto input_grad = PrepareData(grad, kernel.InputAt(1), {});
auto input_velocity = PrepareData(velocity, kernel.InputAt(2), {});
auto input_learning_rate = PrepareData(learning_rate, kernel.InputAt(3), {});
paddle::optional<const phi::DenseTensor&> input_master_param(paddle::none);
auto input_master_param_ptr =
PrepareData(master_param, kernel.InputAt(4), {});
auto input_master_param = PrepareData(master_param, kernel.InputAt(4), {});
std::tuple<Tensor, Tensor, Tensor> api_output;
auto kernel_out_0 = input_param.get();
auto kernel_out_1 = input_velocity.get();
phi::DenseTensor* kernel_out_2 = nullptr;
if (input_master_param_ptr) {
input_master_param =
paddle::make_optional<const phi::DenseTensor&>(*input_master_param_ptr);
kernel_out_2 =
paddle::make_optional<phi::DenseTensor&>(*input_master_param_ptr)
.get_ptr();
if (input_master_param) {
kernel_out_2 = input_master_param.get_ptr();
}
paddle::optional<const phi::MetaTensor&> input_meta_ref_master_param(
paddle::none);
phi::DenseTensor dt;
phi::MetaTensor input_meta_tmp_master_param(dt);
if (input_master_param_ptr) {
input_meta_tmp_master_param.set_dtype(input_master_param_ptr->dtype());
input_meta_tmp_master_param.set_dims(input_master_param_ptr->dims());
input_meta_tmp_master_param.set_layout(input_master_param_ptr->layout());
input_meta_ref_master_param = input_meta_tmp_master_param;
}
auto input_meta_ref_master_param = MakeMetaTensor(input_master_param);
phi::MetaTensor meta_out_0(kernel_out_0);
phi::MetaTensor meta_out_1(kernel_out_1);
if (kernel_out_2) {
......@@ -867,7 +793,7 @@ std::tuple<Tensor, Tensor, Tensor> momentum_impl(
const phi::DenseTensor&,
const phi::DenseTensor&,
const phi::DenseTensor&,
paddle::optional<const phi::DenseTensor&>,
const paddle::optional<phi::DenseTensor>&,
float,
bool,
const std::string&,
......@@ -902,7 +828,7 @@ std::tuple<Tensor, Tensor> sgd_impl(
const Tensor& param,
const Tensor& learning_rate,
const Tensor& grad,
paddle::optional<const Tensor&> master_param,
const paddle::optional<Tensor>& master_param,
bool multi_precision) {
DataType kernel_data_type = ParseDataType(param);
auto kernel_key_set = ParseKernelKeyByInputArgs(param, learning_rate, grad);
......@@ -940,17 +866,8 @@ std::tuple<Tensor, Tensor> sgd_impl(
if (phi::DenseTensor::classof(param_tensor.get())) {
auto in_param = PrepareData(param, kernel.InputAt(0), {});
auto in_master_param = PrepareData(master_param, kernel.InputAt(3), {});
paddle::optional<const phi::DenseTensor&> in_master_param_opt =
master_param
? paddle::make_optional<const phi::DenseTensor&>(*in_master_param)
: paddle::none;
auto master_param_meta = MakeMetaTensor(in_master_param_opt);
paddle::optional<const phi::MetaTensor&> master_param_meta_opt =
master_param
? paddle::make_optional<const phi::MetaTensor&>(*master_param_meta)
: paddle::none;
auto in_master_param_opt = PrepareData(master_param, kernel.InputAt(3), {});
auto master_param_meta_opt = MakeMetaTensor(in_master_param_opt);
phi::DenseTensor* kernel_out_0 =
SetKernelOutput(kernel_key.backend(), &std::get<0>(out));
......@@ -974,7 +891,7 @@ std::tuple<Tensor, Tensor> sgd_impl(
const phi::DenseTensor&,
const phi::DenseTensor&,
const phi::DenseTensor&,
paddle::optional<const phi::DenseTensor&>,
const paddle::optional<phi::DenseTensor>&,
bool,
phi::DenseTensor*,
phi::DenseTensor*);
......@@ -1003,7 +920,7 @@ std::tuple<Tensor, Tensor> sgd_impl(
const phi::DenseTensor&,
const phi::DenseTensor&,
const phi::SelectedRows&,
paddle::optional<const phi::DenseTensor&>,
const paddle::optional<phi::DenseTensor>&,
bool,
phi::DenseTensor*,
phi::DenseTensor*);
......@@ -1020,16 +937,8 @@ std::tuple<Tensor, Tensor> sgd_impl(
} else {
auto in_param = TensorToSelectedRows(param);
auto in_grad = TensorToSelectedRows(grad);
auto in_master_param = TensorToSelectedRows(master_param);
auto in_master_param_opt =
master_param
? paddle::make_optional<const phi::SelectedRows&>(*in_master_param)
: paddle::none;
auto in_master_param_opt = TensorToSelectedRows(master_param);
auto master_param_meta = MakeMetaTensor(in_master_param_opt);
paddle::optional<const phi::MetaTensor&> master_param_meta_opt =
master_param
? paddle::make_optional<const phi::MetaTensor&>(*master_param_meta)
: paddle::none;
phi::SelectedRows* kernel_out_0 =
SetSelectedRowsKernelOutput(kernel_key.backend(), &std::get<0>(out));
......@@ -1041,7 +950,7 @@ std::tuple<Tensor, Tensor> sgd_impl(
SgdInferMeta(MakeMetaTensor(*in_param),
MakeMetaTensor(*in_learning_rate),
MakeMetaTensor(*in_grad),
master_param_meta_opt,
master_param_meta,
multi_precision,
&meta_out_0,
&meta_out_1);
......@@ -1051,7 +960,7 @@ std::tuple<Tensor, Tensor> sgd_impl(
const phi::SelectedRows&,
const phi::DenseTensor&,
const phi::SelectedRows&,
paddle::optional<const phi::SelectedRows&>,
const paddle::optional<phi::SelectedRows>&,
bool,
phi::SelectedRows*,
phi::SelectedRows*);
......
......@@ -39,8 +39,8 @@ std::tuple<Tensor, Tensor, Tensor, Tensor, Tensor, Tensor> adam_impl(
const Tensor& moment2,
const Tensor& beta1_pow,
const Tensor& beta2_pow,
paddle::optional<const Tensor&> master_param,
paddle::optional<const Tensor&> skip_update,
const paddle::optional<Tensor>& master_param,
const paddle::optional<Tensor>& skip_update,
const Scalar& beta1,
const Scalar& beta2,
const Scalar& epsilon,
......@@ -57,8 +57,8 @@ std::tuple<Tensor, Tensor, Tensor, Tensor, Tensor, Tensor> adamw_impl(
const Tensor& moment2,
const Tensor& beta1_pow,
const Tensor& beta2_pow,
paddle::optional<const Tensor&> master_param,
paddle::optional<const Tensor&> skip_update,
const paddle::optional<Tensor>& master_param,
const paddle::optional<Tensor>& skip_update,
const Scalar& beta1,
const Scalar& beta2,
const Scalar& epsilon,
......@@ -107,7 +107,7 @@ std::tuple<Tensor, Tensor, Tensor> momentum_impl(
const Tensor& grad,
const Tensor& velocity,
const Tensor& learning_rate,
paddle::optional<const Tensor&> master_param,
const paddle::optional<Tensor>& master_param,
float mu,
bool use_nesterov,
const std::string& regularization_method,
......@@ -119,7 +119,7 @@ std::tuple<Tensor, Tensor> sgd_impl(
const Tensor& param,
const Tensor& learning_rate,
const Tensor& grad,
paddle::optional<const Tensor&> master_param,
const paddle::optional<Tensor>& master_param,
bool multi_precision);
////////////////// Backward(grad) api impls //////////////////////
......
......@@ -23,10 +23,10 @@ std::shared_ptr<phi::DenseTensor> TensorToDenseTensor(const Tensor& tensor) {
return std::static_pointer_cast<phi::DenseTensor>(tensor.impl());
}
std::shared_ptr<phi::DenseTensor> TensorToDenseTensor(
const paddle::optional<const Tensor&>& tensor) {
paddle::optional<phi::DenseTensor> TensorToDenseTensor(
const paddle::optional<Tensor>& tensor) {
if (tensor) {
return std::static_pointer_cast<phi::DenseTensor>(tensor->impl());
return {*std::static_pointer_cast<phi::DenseTensor>(tensor->impl())};
}
return nullptr;
}
......@@ -48,10 +48,10 @@ std::shared_ptr<phi::SelectedRows> TensorToSelectedRows(const Tensor& tensor) {
return std::static_pointer_cast<phi::SelectedRows>(tensor.impl());
}
std::shared_ptr<phi::SelectedRows> TensorToSelectedRows(
const paddle::optional<const Tensor&>& tensor) {
paddle::optional<phi::SelectedRows> TensorToSelectedRows(
const paddle::optional<Tensor>& tensor) {
if (tensor) {
return std::static_pointer_cast<phi::SelectedRows>(tensor->impl());
return {*std::static_pointer_cast<phi::SelectedRows>(tensor->impl())};
}
return nullptr;
}
......@@ -66,12 +66,12 @@ phi::MetaTensor MakeMetaTensor(const phi::DenseTensor& tensor) {
return phi::MetaTensor(tensor);
}
paddle::optional<phi::MetaTensor> MakeMetaTensor(
const paddle::optional<const phi::DenseTensor&>& tensor) {
phi::MetaTensor MakeMetaTensor(
const paddle::optional<phi::DenseTensor>& tensor) {
if (tensor) {
return {phi::MetaTensor(*tensor)};
}
return {paddle::none};
return phi::MetaTensor();
}
std::vector<phi::MetaTensor> MakeMetaTensor(
......@@ -98,12 +98,12 @@ phi::MetaTensor MakeMetaTensor(const phi::SelectedRows& tensor) {
return phi::MetaTensor(tensor);
}
paddle::optional<phi::MetaTensor> MakeMetaTensor(
const paddle::optional<const phi::SelectedRows&>& tensor) {
phi::MetaTensor MakeMetaTensor(
const paddle::optional<phi::SelectedRows>& tensor) {
if (tensor) {
return {phi::MetaTensor(*tensor)};
}
return {paddle::none};
return phi::MetaTensor();
}
phi::MetaTensor MakeMetaTensor(const phi::StringTensor& tensor) {
......
......@@ -32,7 +32,7 @@ enum class TensorType { DENSE_TENSOR, SPARSE_CSR, SPARSE_COO, STRING_TENSOR };
std::shared_ptr<phi::DenseTensor> TensorToDenseTensor(const Tensor& tensor);
std::shared_ptr<phi::DenseTensor> TensorToDenseTensor(
paddle::optional<phi::DenseTensor> TensorToDenseTensor(
const paddle::optional<Tensor>& tensor);
std::unique_ptr<std::vector<phi::DenseTensor>> TensorToDenseTensor(
......@@ -40,8 +40,8 @@ std::unique_ptr<std::vector<phi::DenseTensor>> TensorToDenseTensor(
std::shared_ptr<phi::SelectedRows> TensorToSelectedRows(const Tensor& tensor);
std::shared_ptr<phi::SelectedRows> TensorToSelectedRows(
const paddle::optional<const Tensor&>& tensor);
paddle::optional<phi::SelectedRows> TensorToSelectedRows(
const paddle::optional<Tensor>& tensor);
std::shared_ptr<phi::StringTensor> TensorToStringTensor(const Tensor& tensor);
......@@ -49,8 +49,8 @@ std::shared_ptr<phi::StringTensor> TensorToStringTensor(const Tensor& tensor);
phi::MetaTensor MakeMetaTensor(const phi::DenseTensor& tensor);
paddle::optional<phi::MetaTensor> MakeMetaTensor(
const paddle::optional<const phi::DenseTensor&>& tensor);
phi::MetaTensor MakeMetaTensor(
const paddle::optional<phi::DenseTensor>& tensor);
std::vector<phi::MetaTensor> MakeMetaTensor(
const std::vector<const phi::DenseTensor*>& tensors);
......@@ -60,8 +60,8 @@ std::vector<phi::MetaTensor> MakeMetaTensor(
phi::MetaTensor MakeMetaTensor(const phi::SelectedRows& tensor);
paddle::optional<phi::MetaTensor> MakeMetaTensor(
const paddle::optional<const phi::SelectedRows&>& tensor);
phi::MetaTensor MakeMetaTensor(
const paddle::optional<phi::SelectedRows>& tensor);
phi::MetaTensor MakeMetaTensor(const phi::StringTensor& tensor);
......
......@@ -249,25 +249,14 @@ std::shared_ptr<phi::DenseTensor> PrepareData(
return nullptr;
}
std::shared_ptr<phi::DenseTensor> PrepareData(
paddle::optional<phi::DenseTensor> PrepareData(
const paddle::optional<Tensor>& input,
const phi::TensorArgDef& target_args_def,
const TransformFlag& transform_flag) {
if (input) {
return PrepareData(*input, target_args_def, transform_flag);
}
return {nullptr};
}
std::shared_ptr<phi::DenseTensor> PrepareData(
const paddle::optional<const Tensor&> input,
const phi::TensorArgDef& target_args_def,
const TransformFlag& transform_flag) {
if (input.get_ptr() != nullptr) {
return PrepareData(*(input.get_ptr()), target_args_def, transform_flag);
return {*PrepareData(*input, target_args_def, transform_flag)};
}
return {nullptr};
return paddle::none;
}
std::unique_ptr<std::vector<phi::DenseTensor>> PrepareData(
......
......@@ -66,7 +66,7 @@ std::shared_ptr<phi::DenseTensor> PrepareData(
const phi::TensorArgDef& target_args_def,
const TransformFlag& transform_flag);
std::shared_ptr<phi::DenseTensor> PrepareData(
paddle::optional<phi::DenseTensor> PrepareData(
const paddle::optional<Tensor>& input,
const phi::TensorArgDef& target_args_def,
const TransformFlag& transform_flag);
......@@ -76,10 +76,5 @@ std::unique_ptr<std::vector<phi::DenseTensor>> PrepareData(
const phi::TensorArgDef& target_args_def,
const TransformFlag& transform_flag);
std::shared_ptr<phi::DenseTensor> PrepareData(
const paddle::optional<const Tensor&> input,
const phi::TensorArgDef& target_args_def,
const TransformFlag& transform_flag);
} // namespace experimental
} // namespace paddle
......@@ -125,8 +125,8 @@ struct KernelKeyParser : ArgsIterator<KernelKeyParser> {
key_set.dtype = tensor.dtype();
}
void operator()(const paddle::optional<const Tensor&> x) {
if (x.get_ptr() != nullptr) {
void operator()(const paddle::optional<Tensor>& x) {
if (x) {
const phi::TensorBase& tensor = *(x.get_ptr()->impl());
AssignKernelKeySet(tensor);
}
......
......@@ -65,14 +65,6 @@ const MetaTensor& InferMetaContext::InputAt(size_t idx) const {
return inputs_.at(idx);
}
paddle::optional<const MetaTensor&> InferMetaContext::OptionalInputAt(
size_t idx) const {
const auto& input = inputs_.at(idx);
return input.initialized()
? paddle::optional<const MetaTensor&>{input}
: paddle::optional<const MetaTensor&>{paddle::none};
}
std::vector<const MetaTensor*> InferMetaContext::InputsBetween(
size_t start, size_t end) const {
std::vector<const MetaTensor*> result;
......@@ -86,7 +78,7 @@ std::vector<const MetaTensor*> InferMetaContext::InputsBetween(
return result;
}
paddle::optional<const std::vector<const MetaTensor*>>
paddle::optional<std::vector<const MetaTensor*>>
InferMetaContext::OptionalInputsBetween(size_t start, size_t end) const {
const auto& first = inputs_.at(start);
......@@ -99,9 +91,9 @@ InferMetaContext::OptionalInputsBetween(size_t start, size_t end) const {
result.emplace_back(in.initialized() ? &in : nullptr);
}
return paddle::optional<const std::vector<const MetaTensor*>>(result);
return paddle::optional<std::vector<const MetaTensor*>>(std::move(result));
}
return paddle::optional<const std::vector<const MetaTensor*>>(paddle::none);
return paddle::none;
}
MetaTensor* InferMetaContext::MutableOutputAt(size_t idx) {
......
......@@ -50,11 +50,10 @@ class InferMetaContext {
paddle::small_vector<MetaTensor, phi::kOutputSmallVectorSize> outputs);
virtual const MetaTensor& InputAt(size_t idx) const;
virtual paddle::optional<const MetaTensor&> OptionalInputAt(size_t idx) const;
virtual std::vector<const MetaTensor*> InputsBetween(size_t start,
size_t end) const;
virtual paddle::optional<const std::vector<const MetaTensor*>>
virtual paddle::optional<std::vector<const MetaTensor*>>
OptionalInputsBetween(size_t start, size_t end) const;
virtual MetaTensor* MutableOutputAt(size_t idx);
......@@ -151,24 +150,6 @@ struct InferMetaFnImpl<Return (*)(Args...), infer_meta_fn> {
}
};
template <typename... Tail>
struct InferMetaFnCallHelper<paddle::optional<const MetaTensor&>, Tail...> {
template <int in_idx, int attr_idx, int out_idx, typename... PreviousArgs>
static void Call(InferMetaContext* ctx, PreviousArgs&... pargs) {
static_assert(attr_idx == 0,
"InferMeta's Input should appear before Attributes.");
static_assert(out_idx == 0,
"InferMeta's Input should appear before Outputs.");
const std::pair<int, int> range = ctx->InputRangeAt(in_idx);
auto arg = ctx->OptionalInputAt(range.first);
InferMetaFnCallHelper<
Tail...>::template Call<in_idx + 1, attr_idx, out_idx>(ctx,
pargs...,
arg);
}
};
template <typename... Tail>
struct InferMetaFnCallHelper<const std::vector<const MetaTensor*>&, Tail...> {
template <int in_idx, int attr_idx, int out_idx, typename... PreviousArgs>
......@@ -189,7 +170,7 @@ struct InferMetaFnImpl<Return (*)(Args...), infer_meta_fn> {
template <typename... Tail>
struct InferMetaFnCallHelper<
paddle::optional<const std::vector<const MetaTensor*>>,
const paddle::optional<std::vector<const MetaTensor*>>&,
Tail...> {
template <int in_idx, int attr_idx, int out_idx, typename... PreviousArgs>
static void Call(InferMetaContext* ctx, PreviousArgs&... pargs) {
......@@ -198,7 +179,7 @@ struct InferMetaFnImpl<Return (*)(Args...), infer_meta_fn> {
static_assert(out_idx == 0,
"InferMeta's Input should appear before Outputs.");
const std::pair<int, int> range = ctx->InputRangeAt(in_idx);
paddle::optional<const std::vector<const MetaTensor*>> arg =
paddle::optional<std::vector<const MetaTensor*>> arg =
ctx->OptionalInputsBetween(range.first, range.second);
InferMetaFnCallHelper<
Tail...>::template Call<in_idx + 1, attr_idx, out_idx>(ctx,
......
......@@ -81,11 +81,11 @@ class KernelContext {
}
template <typename TensorType>
paddle::optional<const TensorType&> OptionalInputAt(size_t idx) const {
const auto& input = inputs_.at(idx);
return input ? paddle::optional<const TensorType&>{static_cast<
const TensorType&>(*input)}
: paddle::optional<const TensorType&>{paddle::none};
paddle::optional<TensorType> OptionalInputAt(size_t idx) const {
const auto* input = inputs_.at(idx);
return input ? paddle::make_optional<TensorType>(
*(static_cast<const TensorType*>(input)))
: paddle::none;
}
template <typename TensorType>
......@@ -99,7 +99,7 @@ class KernelContext {
}
template <typename TensorType>
paddle::optional<const std::vector<const TensorType*>> OptionalInputsBetween(
paddle::optional<std::vector<const TensorType*>> OptionalInputsBetween(
size_t start, size_t end) {
const auto& first = inputs_.at(start);
......@@ -109,9 +109,9 @@ class KernelContext {
auto* t = static_cast<const TensorType*>(inputs_.at(i));
v.emplace_back(t);
}
return paddle::optional<const std::vector<const TensorType*>>(v);
return paddle::optional<std::vector<const TensorType*>>(std::move(v));
}
return paddle::optional<const std::vector<const TensorType*>>(paddle::none);
return paddle::none;
}
template <typename TensorType>
......
......@@ -76,20 +76,20 @@ struct KernelArgsParseFunctor<Return_ (*)(Args_...)> {
default_key.dtype(),
arg_type);
} else if (arg_type == std::type_index(typeid(
paddle::optional<const DenseTensor&>))) {
const paddle::optional<DenseTensor>&))) {
args_def->AppendInput(default_key.backend(),
default_tensor_layout,
default_key.dtype(),
arg_type);
} else if (arg_type == std::type_index(typeid(
paddle::optional<
const std::vector<const DenseTensor*>>))) {
} else if (arg_type ==
std::type_index(typeid(const paddle::optional<
std::vector<const DenseTensor*>>&))) {
args_def->AppendInput(default_key.backend(),
default_tensor_layout,
default_key.dtype(),
arg_type);
} else if (arg_type == std::type_index(typeid(
paddle::optional<const SelectedRows&>))) {
const paddle::optional<SelectedRows>&))) {
args_def->AppendInput(default_key.backend(),
default_tensor_layout,
default_key.dtype(),
......
......@@ -85,7 +85,7 @@ namespace phi {
#define PD_SPECIALIZE_KernelCallHelper_FOR_OPTIONAL_INPUT(tensor_type) \
template <typename... Tail> \
struct KernelCallHelper<paddle::optional<const tensor_type&>, Tail...> { \
struct KernelCallHelper<const paddle::optional<tensor_type>&, Tail...> { \
template <int dev_ctx_idx, \
int in_idx, \
int attr_idx, \
......@@ -129,7 +129,7 @@ namespace phi {
#define PD_SPECIALIZE_KernelCallHelper_FOR_OPTIONAL_MULTI_INPUT(tensor_type) \
template <typename... Tail> \
struct KernelCallHelper< \
paddle::optional<const std::vector<const tensor_type*>>, \
const paddle::optional<std::vector<const tensor_type*>>&, \
Tail...> { \
template <int dev_ctx_idx, \
int in_idx, \
......@@ -142,7 +142,7 @@ namespace phi {
static_assert(out_idx == 0, \
"Kernel's Input should appear before Outputs."); \
const std::pair<int, int>& range = ctx->InputRangeAt(in_idx); \
paddle::optional<const std::vector<const tensor_type*>> arg = \
paddle::optional<std::vector<const tensor_type*>> arg = \
ctx->OptionalInputsBetween<tensor_type>(range.first, range.second); \
KernelCallHelper<Tail...>:: \
template Compute<dev_ctx_idx, in_idx + 1, attr_idx, out_idx>( \
......
......@@ -39,7 +39,9 @@ struct MetaConfig {
class MetaTensor {
public:
MetaTensor() = default;
typedef void (*unspecified_bool_type)();
MetaTensor() : tensor_(nullptr) {}
// supporting implicit construction is easier to use
MetaTensor(TensorBase* tensor) : tensor_(tensor) {} // NOLINT
......@@ -68,12 +70,22 @@ class MetaTensor {
virtual bool initialized() const;
virtual operator unspecified_bool_type() const {
return tensor_ == nullptr ? 0 : unspecified_bool_true;
}
virtual bool operator!() const { return tensor_ == nullptr; }
protected:
static void unspecified_bool_true() {}
private:
// Because the lod in compiletime and runtime is different,
// so `LoD` cannot in public methods
const LoD& lod() const;
TensorBase* tensor() const;
TensorBase* tensor_;
TensorBase* tensor_ = nullptr;
};
} // namespace phi
......@@ -188,7 +188,7 @@ void CrossEntropyWithSoftmaxGradInferMeta(const MetaTensor& label,
void DeformableConvGradInferMeta(const MetaTensor& x,
const MetaTensor& offset,
const MetaTensor& filter,
paddle::optional<const MetaTensor&> mask,
const MetaTensor& mask,
const MetaTensor& out_grad,
const std::vector<int>& strides,
const std::vector<int>& paddings,
......@@ -202,7 +202,7 @@ void DeformableConvGradInferMeta(const MetaTensor& x,
MetaTensor* mask_grad) {
GeneralTernaryGradInferMeta(x, offset, filter, dx, offset_grad, filter_grad);
if (mask) {
UnchangedInferMeta(mask.get(), mask_grad);
UnchangedInferMeta(mask, mask_grad);
}
}
......@@ -314,7 +314,7 @@ void GumbelSoftmaxGradInferMeta(const MetaTensor& out,
void InstanceNormGradInferMeta(const MetaTensor& x,
const MetaTensor& y_grad,
paddle::optional<const MetaTensor&> scale,
const MetaTensor& scale,
const MetaTensor& saved_mean,
const MetaTensor& saved_variance,
float epsilon,
......@@ -338,19 +338,18 @@ void InstanceNormGradInferMeta(const MetaTensor& x,
bias_grad->set_dims({C});
}
}
void InstanceNormDoubleGradInferMeta(
const MetaTensor& x,
paddle::optional<const MetaTensor&> scale,
const MetaTensor& saved_mean,
const MetaTensor& saved_variance,
const MetaTensor& dy,
paddle::optional<const MetaTensor&> ddx,
paddle::optional<const MetaTensor&> ddscale,
paddle::optional<const MetaTensor&> ddbias,
float epsilon,
MetaTensor* dx,
MetaTensor* dscale,
MetaTensor* ddy) {
void InstanceNormDoubleGradInferMeta(const MetaTensor& x,
const MetaTensor& scale,
const MetaTensor& saved_mean,
const MetaTensor& saved_variance,
const MetaTensor& dy,
const MetaTensor& ddx,
const MetaTensor& ddscale,
const MetaTensor& ddbias,
float epsilon,
MetaTensor* dx,
MetaTensor* dscale,
MetaTensor* ddy) {
PADDLE_ENFORCE_NE(
dx,
nullptr,
......@@ -436,7 +435,7 @@ void MultiplexGradInferMeta(const MetaTensor& ids,
void NllLossGradInferMeta(const MetaTensor& x,
const MetaTensor& label,
paddle::optional<const MetaTensor&> weight,
const MetaTensor& weight,
const MetaTensor& total_weight,
const MetaTensor& out_grad,
int64_t ignore_index,
......@@ -549,7 +548,7 @@ void PoolGradInferMeta(const MetaTensor& x,
void PsroiPoolGradInferMeta(const MetaTensor& x,
const MetaTensor& rois,
paddle::optional<const MetaTensor&> rois_num,
const MetaTensor& rois_num,
const MetaTensor& dout,
int pooled_height,
int pooled_width,
......
......@@ -87,7 +87,7 @@ void CrossEntropyWithSoftmaxGradInferMeta(const MetaTensor& label,
void DeformableConvGradInferMeta(const MetaTensor& x,
const MetaTensor& offset,
const MetaTensor& filter,
paddle::optional<const MetaTensor&> mask,
const MetaTensor& mask,
const MetaTensor& out_grad,
const std::vector<int>& strides,
const std::vector<int>& paddings,
......@@ -146,7 +146,7 @@ void GumbelSoftmaxGradInferMeta(const MetaTensor& out,
void InstanceNormGradInferMeta(const MetaTensor& x,
const MetaTensor& y_grad,
paddle::optional<const MetaTensor&> scale,
const MetaTensor& scale,
const MetaTensor& saved_mean,
const MetaTensor& saved_variance,
float epsilon,
......@@ -154,19 +154,18 @@ void InstanceNormGradInferMeta(const MetaTensor& x,
MetaTensor* scale_grad,
MetaTensor* bias_grad);
void InstanceNormDoubleGradInferMeta(
const MetaTensor& x,
paddle::optional<const MetaTensor&> scale,
const MetaTensor& saved_mean,
const MetaTensor& saved_variance,
const MetaTensor& dy,
paddle::optional<const MetaTensor&> ddx,
paddle::optional<const MetaTensor&> ddscale,
paddle::optional<const MetaTensor&> ddbias,
float epsilon,
MetaTensor* dx,
MetaTensor* dscale,
MetaTensor* ddy);
void InstanceNormDoubleGradInferMeta(const MetaTensor& x,
const MetaTensor& scale,
const MetaTensor& saved_mean,
const MetaTensor& saved_variance,
const MetaTensor& dy,
const MetaTensor& ddx,
const MetaTensor& ddscale,
const MetaTensor& ddbias,
float epsilon,
MetaTensor* dx,
MetaTensor* dscale,
MetaTensor* ddy);
void KernelWithXShapeInferMeta(const MetaTensor& xshape, MetaTensor* dx);
......@@ -194,7 +193,7 @@ void MultiplexGradInferMeta(const MetaTensor& ids,
void NllLossGradInferMeta(const MetaTensor& input,
const MetaTensor& label,
paddle::optional<const MetaTensor&> weight,
const MetaTensor& weight,
const MetaTensor& total_weight,
const MetaTensor& out_grad,
int64_t ignore_index,
......@@ -209,7 +208,7 @@ void PixelUnshuffleGradInferMeta(const MetaTensor& out_grad,
void PsroiPoolGradInferMeta(const MetaTensor& x,
const MetaTensor& rois,
paddle::optional<const MetaTensor&> rois_num,
const MetaTensor& rois_num,
const MetaTensor& dout,
int pooled_height,
int pooled_width,
......
......@@ -201,7 +201,7 @@ void BCELossInferMeta(const MetaTensor& input,
}
void BincountInferMeta(const MetaTensor& x,
const paddle::optional<const MetaTensor&> weights,
const MetaTensor& weights,
int minlength,
MetaTensor* out) {
auto input_dim = x.dims();
......@@ -220,8 +220,10 @@ void BincountInferMeta(const MetaTensor& x,
"But the dimension of Input(X) is [%d]",
input_dim.size()));
if (weights.is_initialized()) {
auto weights_dim = weights->dims();
VLOG(1) << "####### CHECK weights";
if (weights) {
auto weights_dim = weights.dims();
VLOG(1) << "##### weights_dim " << weights_dim;
PADDLE_ENFORCE_EQ(weights_dim.size(),
1,
phi::errors::InvalidArgument(
......@@ -241,8 +243,8 @@ void BincountInferMeta(const MetaTensor& x,
input_dim));
}
out->set_dims(phi::make_ddim({-1}));
if (weights.is_initialized()) {
out->set_dtype(weights->dtype());
if (weights) {
out->set_dtype(weights.dtype());
} else {
out->set_dtype(x.dtype());
}
......@@ -864,7 +866,7 @@ void DistInferMeta(const MetaTensor& x,
}
void DropoutInferMeta(const MetaTensor& x,
paddle::optional<const MetaTensor&> seed_tensor,
const MetaTensor& seed_tensor,
float p,
bool is_test,
const std::string& mode,
......@@ -982,7 +984,7 @@ void ElementwiseRawInferMeta(const MetaTensor& x,
}
void ExpandAsInferMeta(const MetaTensor& x,
paddle::optional<const MetaTensor&> y,
const MetaTensor& y,
const std::vector<int>& target_shape,
MetaTensor* out) {
#define MAX_RANK_SUPPORTED 6
......
......@@ -56,7 +56,7 @@ void BCELossInferMeta(const MetaTensor& input,
MetaConfig config = MetaConfig());
void BincountInferMeta(const MetaTensor& x,
const paddle::optional<const MetaTensor&> weights,
const MetaTensor& weights,
int minlength,
MetaTensor* out);
......@@ -136,7 +136,7 @@ void DistInferMeta(const MetaTensor& x,
void DotInferMeta(const MetaTensor& x, const MetaTensor& y, MetaTensor* out);
void DropoutInferMeta(const MetaTensor& x,
paddle::optional<const MetaTensor&> seed_tensor,
const MetaTensor& seed_tensor,
float p,
bool is_test,
const std::string& mode,
......@@ -155,7 +155,7 @@ void ElementwiseRawInferMeta(const MetaTensor& x_meta,
MetaTensor* out);
void ExpandAsInferMeta(const MetaTensor& x,
paddle::optional<const MetaTensor&> y,
const MetaTensor& y,
const std::vector<int>& target_shape,
MetaTensor* out);
......
......@@ -100,8 +100,8 @@ void AdamInferMeta(const MetaTensor& param,
const MetaTensor& moment2,
const MetaTensor& beta1_pow,
const MetaTensor& beta2_pow,
paddle::optional<const MetaTensor&> master_param,
paddle::optional<const MetaTensor&> skip_update,
const MetaTensor& master_param,
const MetaTensor& skip_update,
const Scalar& beta1,
const Scalar& beta2,
const Scalar& epsilon,
......@@ -238,8 +238,8 @@ void AdamwInferMeta(const MetaTensor& param,
const MetaTensor& moment2,
const MetaTensor& beta1_pow,
const MetaTensor& beta2_pow,
paddle::optional<const MetaTensor&> master_param,
paddle::optional<const MetaTensor&> skip_update,
const MetaTensor& master_param,
const MetaTensor& skip_update,
const Scalar& beta1,
const Scalar& beta2,
const Scalar& epsilon,
......@@ -580,7 +580,7 @@ void BatchNormInferInferMeta(const MetaTensor& x,
void BilinearTensorProductInferMeta(const MetaTensor& x,
const MetaTensor& y,
const MetaTensor& weight,
paddle::optional<const MetaTensor&> bias,
const MetaTensor& bias,
MetaTensor* out,
MetaConfig config) {
auto x_dims = x.dims();
......@@ -619,8 +619,8 @@ void BilinearTensorProductInferMeta(const MetaTensor& x,
"The second dimension of input(Y) must be equal to "
"the third dimension of the input(Weight)."));
if (bias.get_ptr()) {
auto bias_dims = bias->dims();
if (bias) {
auto bias_dims = bias.dims();
PADDLE_ENFORCE_EQ(bias_dims.size(),
2UL,
errors::InvalidArgument(
......@@ -772,7 +772,7 @@ inline int ConvOutputSize(
void DeformableConvInferMeta(const MetaTensor& x,
const MetaTensor& offset,
const MetaTensor& filter,
paddle::optional<const MetaTensor&> mask,
const MetaTensor& mask,
const std::vector<int>& strides,
const std::vector<int>& paddings,
const std::vector<int>& dilations,
......@@ -918,7 +918,7 @@ void DeformableConvInferMeta(const MetaTensor& x,
deformable_groups));
if (mask) {
auto mask_dims = mask->dims();
auto mask_dims = mask.dims();
PADDLE_ENFORCE_EQ(output_shape[2],
mask_dims[2],
phi::errors::InvalidArgument(
......@@ -958,9 +958,9 @@ void DeformableConvInferMeta(const MetaTensor& x,
void HierarchicalSigmoidInferMeta(const MetaTensor& x,
const MetaTensor& w,
const MetaTensor& label,
paddle::optional<const MetaTensor&> path,
paddle::optional<const MetaTensor&> code,
paddle::optional<const MetaTensor&> bias,
const MetaTensor& path,
const MetaTensor& code,
const MetaTensor& bias,
int num_classes,
bool remote_prefetch,
int trainer_id,
......@@ -991,9 +991,9 @@ void HierarchicalSigmoidInferMeta(const MetaTensor& x,
static void Interpolate1DInferShapeCheck(
const MetaTensor& x,
paddle::optional<const MetaTensor&> out_size,
paddle::optional<const std::vector<const MetaTensor*>> size_tensor,
paddle::optional<const MetaTensor&> scale_tensor,
const MetaTensor& out_size,
const paddle::optional<std::vector<const MetaTensor*>>& size_tensor,
const MetaTensor& scale_tensor,
const std::string& data_layout_str,
int out_d,
int out_h,
......@@ -1048,7 +1048,7 @@ static void Interpolate1DInferShapeCheck(
int out_w_tmp;
if (scale_tensor) {
auto scale_tensor_dim = scale_tensor->dims();
auto scale_tensor_dim = scale_tensor.dims();
PADDLE_ENFORCE_EQ(
scale_tensor_dim.size(),
1,
......@@ -1086,7 +1086,7 @@ static void Interpolate1DInferShapeCheck(
}
if (out_size && config.is_runtime) {
auto out_size_dim = out_size->dims();
auto out_size_dim = out_size.dims();
PADDLE_ENFORCE_EQ(
out_size_dim.size(),
1,
......@@ -1118,9 +1118,9 @@ static void Interpolate1DInferShapeCheck(
static void Interpolate2DInferShapeCheck(
const MetaTensor& x,
paddle::optional<const MetaTensor&> out_size,
paddle::optional<const std::vector<const MetaTensor*>> size_tensor,
paddle::optional<const MetaTensor&> scale_tensor,
const MetaTensor& out_size,
const paddle::optional<std::vector<const MetaTensor*>>& size_tensor,
const MetaTensor& scale_tensor,
const std::string& data_layout_str,
int out_d,
int out_h,
......@@ -1178,7 +1178,7 @@ static void Interpolate2DInferShapeCheck(
int out_h_tmp, out_w_tmp;
if (scale_tensor) {
auto scale_tensor_dim = scale_tensor->dims();
auto scale_tensor_dim = scale_tensor.dims();
PADDLE_ENFORCE_EQ(
scale_tensor_dim.size(),
1,
......@@ -1231,7 +1231,7 @@ static void Interpolate2DInferShapeCheck(
}
if (out_size && config.is_runtime) {
auto out_size_dim = out_size->dims();
auto out_size_dim = out_size.dims();
PADDLE_ENFORCE_EQ(
out_size_dim.size(),
1,
......@@ -1263,9 +1263,9 @@ static void Interpolate2DInferShapeCheck(
static void Interpolate3DInferShapeCheck(
const MetaTensor& x,
paddle::optional<const MetaTensor&> out_size,
paddle::optional<const std::vector<const MetaTensor*>> size_tensor,
paddle::optional<const MetaTensor&> scale_tensor,
const MetaTensor& out_size,
const paddle::optional<std::vector<const MetaTensor*>>& size_tensor,
const MetaTensor& scale_tensor,
const std::string& data_layout_str,
int out_d,
int out_h,
......@@ -1321,7 +1321,7 @@ static void Interpolate3DInferShapeCheck(
int out_d_tmp, out_h_tmp, out_w_tmp;
if (scale_tensor) {
auto scale_tensor_dim = scale_tensor->dims();
auto scale_tensor_dim = scale_tensor.dims();
PADDLE_ENFORCE_EQ(
scale_tensor_dim.size(),
1,
......@@ -1389,7 +1389,7 @@ static void Interpolate3DInferShapeCheck(
}
if (out_size && config.is_runtime) {
auto out_size_dim = out_size->dims();
auto out_size_dim = out_size.dims();
PADDLE_ENFORCE_EQ(
out_size_dim.size(),
1,
......@@ -1419,9 +1419,9 @@ static void Interpolate3DInferShapeCheck(
void InterpolateInferMeta(
const MetaTensor& x,
paddle::optional<const MetaTensor&> out_size,
paddle::optional<const std::vector<const MetaTensor*>> size_tensor,
paddle::optional<const MetaTensor&> scale_tensor,
const MetaTensor& out_size,
const paddle::optional<std::vector<const MetaTensor*>>& size_tensor,
const MetaTensor& scale_tensor,
const std::string& data_layout_str,
int out_d,
int out_h,
......@@ -1546,7 +1546,7 @@ void MomentumInferMeta(const MetaTensor& param,
const MetaTensor& grad,
const MetaTensor& velocity,
const MetaTensor& learning_rate,
paddle::optional<const MetaTensor&> master_param,
const MetaTensor& master_param,
float mu,
bool use_nesterov,
const std::string& regularization_method,
......@@ -1709,7 +1709,7 @@ void MultiplexInferMeta(const std::vector<const MetaTensor*>& ins,
void PsroiPoolInferMeta(const MetaTensor& x,
const MetaTensor& rois,
paddle::optional<const MetaTensor&> rois_num,
const MetaTensor& rois_num,
int pooled_height,
int pooled_width,
int output_channels,
......@@ -1732,8 +1732,8 @@ void PsroiPoolInferMeta(const MetaTensor& x,
errors::InvalidArgument(
"ROIs should be a 2-D LoDTensor of shape (num_rois, 4) "
"given as [(x1, y1, x2, y2), ...]"));
if (rois_num.get_ptr()) {
auto rois_num_dims = rois_num->dims();
if (rois_num) {
auto rois_num_dims = rois_num.dims();
PADDLE_ENFORCE_EQ(
rois_num_dims.size(),
1,
......@@ -1787,7 +1787,7 @@ void RmspropInferMeta(const MetaTensor& param,
const MetaTensor& grad,
const MetaTensor& moment,
const MetaTensor& learning_rate,
paddle::optional<const MetaTensor&> mean_grad,
const MetaTensor& mean_grad,
float epsilon,
float decay,
float momentum,
......@@ -1837,14 +1837,14 @@ void RmspropInferMeta(const MetaTensor& param,
mean_square_out->set_dtype(mean_square.dtype());
if (centered) {
mean_grad_out->set_dims(param_dim);
mean_grad_out->set_dtype(mean_grad.get_ptr()->dtype());
mean_grad_out->set_dtype(mean_grad.dtype());
}
}
void RnnInferMeta(const MetaTensor& x,
const std::vector<const MetaTensor*>& pre_state,
const std::vector<const MetaTensor*>& weight_list,
paddle::optional<const MetaTensor&> sequence_length,
const MetaTensor& sequence_length,
float dropout_prob,
bool is_bidirec,
int input_size,
......@@ -1867,7 +1867,7 @@ void RnnInferMeta(const MetaTensor& x,
in_dims.size()));
if (sequence_length) {
auto seq_dims = sequence_length->dims();
auto seq_dims = sequence_length.dims();
PADDLE_ENFORCE_EQ(
in_dims[1],
seq_dims[0],
......@@ -1929,7 +1929,7 @@ void RnnInferMeta(const MetaTensor& x,
void SgdInferMeta(const MetaTensor& param,
const MetaTensor& learning_rate,
const MetaTensor& grad,
paddle::optional<const MetaTensor&> master_param,
const MetaTensor& master_param,
bool multi_precision,
MetaTensor* param_out,
MetaTensor* master_param_out) {
......@@ -2006,8 +2006,8 @@ void UnchangedMultiInferMeta(const std::vector<const MetaTensor*>& x,
void WarpctcInferMeta(const MetaTensor& logits,
const MetaTensor& label,
const paddle::optional<const MetaTensor&> logits_length,
const paddle::optional<const MetaTensor&> labels_length,
const MetaTensor& logits_length,
const MetaTensor& labels_length,
int blank,
bool norm_by_times,
MetaTensor* warpctc_grad,
......@@ -2015,7 +2015,7 @@ void WarpctcInferMeta(const MetaTensor& logits,
auto logits_dims = logits.dims();
int sequence_width = 0;
if (logits_length.is_initialized()) {
if (logits_length) {
sequence_width = logits_dims[2];
} else {
sequence_width =
......@@ -2069,8 +2069,8 @@ void WhereInferMeta(const MetaTensor& condition,
void GraphReindexInferMeta(const MetaTensor& x,
const MetaTensor& neighbors,
const MetaTensor& count,
paddle::optional<const MetaTensor&> hashtable_value,
paddle::optional<const MetaTensor&> hashtable_index,
const MetaTensor& hashtable_value,
const MetaTensor& hashtable_index,
bool flag_buffer_hashtable,
MetaTensor* reindex_src,
MetaTensor* reindex_dst,
......@@ -2100,8 +2100,8 @@ void GraphReindexInferMeta(const MetaTensor& x,
GraphReindexShapeCheck(neighbors.dims(), "Neighbors");
GraphReindexShapeCheck(count.dims(), "Count");
if (flag_buffer_hashtable) {
GraphReindexShapeCheck(hashtable_value->dims(), "HashTable_Value");
GraphReindexShapeCheck(hashtable_index->dims(), "HashTable_Index");
GraphReindexShapeCheck(hashtable_value.dims(), "HashTable_Value");
GraphReindexShapeCheck(hashtable_index.dims(), "HashTable_Index");
}
reindex_src->set_dims({-1});
......@@ -2112,18 +2112,17 @@ void GraphReindexInferMeta(const MetaTensor& x,
out_nodes->set_dtype(x.dtype());
}
void GraphSampleNeighborsInferMeta(
const MetaTensor& row,
const MetaTensor& col_ptr,
const MetaTensor& x,
paddle::optional<const MetaTensor&> eids,
paddle::optional<const MetaTensor&> perm_buffer,
int sample_size,
bool return_eids,
bool flag_perm_buffer,
MetaTensor* out,
MetaTensor* out_count,
MetaTensor* out_eids) {
void GraphSampleNeighborsInferMeta(const MetaTensor& row,
const MetaTensor& col_ptr,
const MetaTensor& x,
const MetaTensor& eids,
const MetaTensor& perm_buffer,
int sample_size,
bool return_eids,
bool flag_perm_buffer,
MetaTensor* out,
MetaTensor* out_count,
MetaTensor* out_eids) {
// GSN: GraphSampleNeighbors
auto GSNShapeCheck = [](const phi::DDim& dims, std::string tensor_name) {
if (dims.size() == 2) {
......@@ -2149,12 +2148,12 @@ void GraphSampleNeighborsInferMeta(
GSNShapeCheck(col_ptr.dims(), "Col_Ptr");
GSNShapeCheck(x.dims(), "X");
if (return_eids) {
GSNShapeCheck(eids->dims(), "Eids");
GSNShapeCheck(eids.dims(), "Eids");
out_eids->set_dims({-1});
out_eids->set_dtype(row.dtype());
}
if (flag_perm_buffer) {
GSNShapeCheck(perm_buffer->dims(), "Perm_Buffer");
GSNShapeCheck(perm_buffer.dims(), "Perm_Buffer");
}
out->set_dims({-1});
......@@ -2166,7 +2165,7 @@ void GraphSampleNeighborsInferMeta(
void Yolov3LossInferMeta(const MetaTensor& x,
const MetaTensor& gt_box,
const MetaTensor& gt_label,
const paddle::optional<const MetaTensor&> gt_score,
const MetaTensor& gt_score,
const std::vector<int>& anchors,
const std::vector<int>& anchor_mask,
int class_num,
......@@ -2271,8 +2270,8 @@ void Yolov3LossInferMeta(const MetaTensor& x,
"But received class_num(%s) < 0",
class_num));
if (gt_score.get_ptr()) {
auto dim_gtscore = gt_score->dims();
if (gt_score) {
auto dim_gtscore = gt_score.dims();
PADDLE_ENFORCE_EQ(
dim_gtscore.size(),
2,
......
......@@ -76,8 +76,8 @@ void AdamInferMeta(const MetaTensor& param,
const MetaTensor& moment2,
const MetaTensor& beta1_pow,
const MetaTensor& beta2_pow,
paddle::optional<const MetaTensor&> master_param,
paddle::optional<const MetaTensor&> skip_update,
const MetaTensor& master_param,
const MetaTensor& skip_update,
const Scalar& beta1,
const Scalar& beta2,
const Scalar& epsilon,
......@@ -99,8 +99,8 @@ void AdamwInferMeta(const MetaTensor& param,
const MetaTensor& moment2,
const MetaTensor& beta1_pow,
const MetaTensor& beta2_pow,
paddle::optional<const MetaTensor&> master_param,
paddle::optional<const MetaTensor&> skip_update,
const MetaTensor& master_param,
const MetaTensor& skip_update,
const Scalar& beta1,
const Scalar& beta2,
const Scalar& epsilon,
......@@ -170,7 +170,7 @@ void BatchNormInferInferMeta(const MetaTensor& x,
void BilinearTensorProductInferMeta(const MetaTensor& x,
const MetaTensor& y,
const MetaTensor& weight,
paddle::optional<const MetaTensor&> bias,
const MetaTensor& bias,
MetaTensor* out,
MetaConfig config = MetaConfig());
......@@ -185,7 +185,7 @@ void ConcatInferMeta(const std::vector<const MetaTensor*>& x,
void DeformableConvInferMeta(const MetaTensor& x,
const MetaTensor& offset,
const MetaTensor& filter,
paddle::optional<const MetaTensor&> mask,
const MetaTensor& mask,
const std::vector<int>& strides,
const std::vector<int>& paddings,
const std::vector<int>& dilations,
......@@ -198,9 +198,9 @@ void DeformableConvInferMeta(const MetaTensor& x,
void HierarchicalSigmoidInferMeta(const MetaTensor& x,
const MetaTensor& w,
const MetaTensor& label,
paddle::optional<const MetaTensor&> path,
paddle::optional<const MetaTensor&> code,
paddle::optional<const MetaTensor&> bias,
const MetaTensor& path,
const MetaTensor& code,
const MetaTensor& bias,
int num_classes,
bool remote_prefetch,
int trainer_id,
......@@ -214,9 +214,9 @@ void HierarchicalSigmoidInferMeta(const MetaTensor& x,
void InterpolateInferMeta(
const MetaTensor& x,
paddle::optional<const MetaTensor&> out_size,
paddle::optional<const std::vector<const MetaTensor*>> size_tensor,
paddle::optional<const MetaTensor&> scale_tensor,
const MetaTensor& out_size,
const paddle::optional<std::vector<const MetaTensor*>>& size_tensor,
const MetaTensor& scale_tensor,
const std::string& data_layout,
int out_d,
int out_h,
......@@ -241,7 +241,7 @@ void MomentumInferMeta(const MetaTensor& param,
const MetaTensor& grad,
const MetaTensor& velocity,
const MetaTensor& learning_rate,
paddle::optional<const MetaTensor&> master_param,
const MetaTensor& master_param,
float mu,
bool use_nesterov,
const std::string& regularization_method,
......@@ -261,7 +261,7 @@ void MultiplexInferMeta(const std::vector<const MetaTensor*>& ins,
void PsroiPoolInferMeta(const MetaTensor& x,
const MetaTensor& rois,
paddle::optional<const MetaTensor&> rois_num,
const MetaTensor& rois_num,
int pooled_height,
int pooled_width,
int output_channels,
......@@ -273,7 +273,7 @@ void RmspropInferMeta(const MetaTensor& param,
const MetaTensor& grad,
const MetaTensor& moment,
const MetaTensor& learning_rate,
paddle::optional<const MetaTensor&> mean_grad,
const MetaTensor& mean_grad,
float epsilon,
float decay,
float momentum,
......@@ -286,7 +286,7 @@ void RmspropInferMeta(const MetaTensor& param,
void RnnInferMeta(const MetaTensor& x,
const std::vector<const MetaTensor*>& pre_state,
const std::vector<const MetaTensor*>& weight_list,
paddle::optional<const MetaTensor&> sequence_length,
const MetaTensor& sequence_length,
float dropout_prob,
bool is_bidirec,
int input_size,
......@@ -303,7 +303,7 @@ void RnnInferMeta(const MetaTensor& x,
void SgdInferMeta(const MetaTensor& param,
const MetaTensor& learning_rate,
const MetaTensor& grad,
paddle::optional<const MetaTensor&> master_param,
const MetaTensor& master_param,
bool multi_precision,
MetaTensor* param_out,
MetaTensor* master_param_out);
......@@ -317,8 +317,8 @@ void UnchangedMultiInferMeta(const std::vector<const MetaTensor*>& x,
void WarpctcInferMeta(const MetaTensor& logits,
const MetaTensor& label,
const paddle::optional<const MetaTensor&> logits_length,
const paddle::optional<const MetaTensor&> labels_length,
const MetaTensor& logits_length,
const MetaTensor& labels_length,
int blank,
bool norm_by_times,
MetaTensor* warpctc_grad,
......@@ -332,30 +332,29 @@ void WhereInferMeta(const MetaTensor& condition,
void GraphReindexInferMeta(const MetaTensor& x,
const MetaTensor& neighbors,
const MetaTensor& count,
paddle::optional<const MetaTensor&> hashtable_value,
paddle::optional<const MetaTensor&> hashtable_index,
const MetaTensor& hashtable_value,
const MetaTensor& hashtable_index,
bool flag_buffer_hashtable,
MetaTensor* reindex_src,
MetaTensor* reindex_dst,
MetaTensor* out_nodes);
void GraphSampleNeighborsInferMeta(
const MetaTensor& row,
const MetaTensor& col_ptr,
const MetaTensor& x,
paddle::optional<const MetaTensor&> eids,
paddle::optional<const MetaTensor&> perm_buffer,
int sample_size,
bool return_eids,
bool flag_perm_buffer,
MetaTensor* out,
MetaTensor* out_count,
MetaTensor* out_eids);
void GraphSampleNeighborsInferMeta(const MetaTensor& row,
const MetaTensor& col_ptr,
const MetaTensor& x,
const MetaTensor& eids,
const MetaTensor& perm_buffer,
int sample_size,
bool return_eids,
bool flag_perm_buffer,
MetaTensor* out,
MetaTensor* out_count,
MetaTensor* out_eids);
void Yolov3LossInferMeta(const MetaTensor& x,
const MetaTensor& gt_box,
const MetaTensor& gt_label,
const paddle::optional<const MetaTensor&> gt_score,
const MetaTensor& gt_score,
const std::vector<int>& anchors,
const std::vector<int>& anchor_mask,
int class_num,
......
......@@ -192,8 +192,8 @@ void ArangeInferMeta(const MetaTensor& start,
}
void InstanceNormInferMeta(const MetaTensor& x,
paddle::optional<const MetaTensor&> scale,
paddle::optional<const MetaTensor&> bias,
const MetaTensor& scale,
const MetaTensor& bias,
float epsilon,
MetaTensor* y,
MetaTensor* saved_mean,
......@@ -242,9 +242,8 @@ void InstanceNormInferMeta(const MetaTensor& x,
auto N = x_dims[0];
auto C = x_dims[1];
auto NxC = N * C;
const auto scale_ptr = scale.get_ptr();
if (scale_ptr) {
auto scale_dim = scale_ptr->dims();
if (scale) {
auto scale_dim = scale.dims();
PADDLE_ENFORCE_EQ(
scale_dim.size(),
1UL,
......@@ -265,9 +264,8 @@ void InstanceNormInferMeta(const MetaTensor& x,
scale_dim[0]));
}
}
const auto bias_ptr = bias.get_ptr();
if (bias_ptr) {
auto bias_dim = bias_ptr->dims();
if (bias) {
auto bias_dim = bias.dims();
PADDLE_ENFORCE_EQ(
bias_dim.size(),
1UL,
......@@ -365,8 +363,8 @@ void GraphSendRecvInferMeta(const MetaTensor& x,
}
void LayerNormInferMeta(const MetaTensor& x,
paddle::optional<const MetaTensor&> scale,
paddle::optional<const MetaTensor&> bias,
const MetaTensor& scale,
const MetaTensor& bias,
float epsilon,
int begin_norm_axis,
bool is_test,
......@@ -388,19 +386,19 @@ void LayerNormInferMeta(const MetaTensor& x,
auto matrix_dim = phi::flatten_to_2d(x_dim, begin_norm_axis);
int left = static_cast<int>(matrix_dim[0]);
int right = static_cast<int>(matrix_dim[1]);
if (scale.get_ptr() != nullptr) {
PADDLE_ENFORCE_EQ(scale->dims().size(),
if (scale) {
PADDLE_ENFORCE_EQ(scale.dims().size(),
1,
phi::errors::InvalidArgument(
"The dimensions of Input(Scale) must be 1, but "
"received dimensions of"
"Input(Scale) is [%d]",
scale->dims().size()));
scale.dims().size()));
}
if (config.is_runtime && scale.get_ptr() != nullptr) {
if (config.is_runtime && scale) {
PADDLE_ENFORCE_EQ(
scale->dims()[0],
scale.dims()[0],
right,
phi::errors::InvalidArgument(
"The first dimension value of Input(Scale) must equal to be the"
......@@ -408,21 +406,21 @@ void LayerNormInferMeta(const MetaTensor& x,
"But received the first dimension value of Input(Scale) is"
"[%d], the second dimension value of the flattened 2D matrix of"
" Input(Scale) is [%d].",
scale->dims()[0],
scale.dims()[0],
right));
}
if (bias.get_ptr() != nullptr) {
PADDLE_ENFORCE_EQ(bias->dims().size(),
if (bias) {
PADDLE_ENFORCE_EQ(bias.dims().size(),
1,
phi::errors::InvalidArgument(
"The dimensions of Input(Bias) must be 1, but "
"received dimensions of"
"Input(Bias) is [%d]",
bias->dims().size()));
bias.dims().size()));
}
if (config.is_runtime && bias.get_ptr() != nullptr) {
if (config.is_runtime && bias) {
PADDLE_ENFORCE_EQ(
bias->dims()[0],
bias.dims()[0],
right,
phi::errors::InvalidArgument(
"The first dimension value of Input(Bias) must equal to be the"
......@@ -430,7 +428,7 @@ void LayerNormInferMeta(const MetaTensor& x,
"But received the first dimension value of Input(Bias) is"
"[%d], the second dimension value of the flattened 2D matrix of"
" Input(Bias) is [%d].",
bias->dims()[0],
bias.dims()[0],
right));
}
......@@ -445,19 +443,19 @@ void LayerNormInferMeta(const MetaTensor& x,
}
void LayerNormGradInferMeta(const MetaTensor& x,
paddle::optional<const MetaTensor&> y,
paddle::optional<const MetaTensor&> z,
const MetaTensor& y,
const MetaTensor& z,
MetaTensor* dx,
MetaTensor* dy,
MetaTensor* dz) {
if (dx) {
dx->share_meta(x);
}
if (dy && (y.get_ptr() != nullptr)) {
dy->share_meta(*y.get_ptr());
if (dy && y) {
dy->share_meta(y);
}
if (dz && (z.get_ptr() != nullptr)) {
dz->share_meta(*z.get_ptr());
if (dz && z) {
dz->share_meta(z);
}
}
......@@ -517,7 +515,7 @@ void LinspaceInferMeta(const MetaTensor& start,
void NllLossRawInferMeta(const MetaTensor& input,
const MetaTensor& label,
paddle::optional<const MetaTensor&> weight,
const MetaTensor& weight,
int64_t ignore_index,
const std::string& reduction,
MetaTensor* out,
......@@ -542,8 +540,8 @@ void NllLossRawInferMeta(const MetaTensor& input,
" batch_size is [%s].",
x_dims[0],
label_dims[0]));
if (weight.get_ptr() != nullptr) {
auto w_dims = weight->dims();
if (weight) {
auto w_dims = weight.dims();
PADDLE_ENFORCE_EQ(
w_dims.size(),
1,
......@@ -607,7 +605,7 @@ void PutAlongAxisInferMeta(const MetaTensor& x,
void RoiAlignInferMeta(const MetaTensor& x,
const MetaTensor& boxes,
paddle::optional<const MetaTensor&> boxes_num,
const MetaTensor& boxes_num,
int pooled_height,
int pooled_width,
float spatial_scale,
......@@ -619,7 +617,7 @@ void RoiAlignInferMeta(const MetaTensor& x,
auto boxes_dims = boxes.dims();
if (boxes_num) {
auto boxes_num_dims = boxes_num->dims();
auto boxes_num_dims = boxes_num.dims();
PADDLE_ENFORCE_EQ(
boxes_num_dims.size(),
1,
......@@ -684,7 +682,7 @@ void RoiAlignInferMeta(const MetaTensor& x,
void RoiPoolInferMeta(const MetaTensor& x,
const MetaTensor& boxes,
paddle::optional<const MetaTensor&> boxes_num,
const MetaTensor& boxes_num,
int pooled_height,
int pooled_width,
float spatial_scale,
......@@ -694,7 +692,7 @@ void RoiPoolInferMeta(const MetaTensor& x,
auto boxes_dims = boxes.dims();
if (boxes_num) {
auto boxes_num_dims = boxes_num->dims();
auto boxes_num_dims = boxes_num.dims();
PADDLE_ENFORCE_EQ(
boxes_num_dims.size(),
1,
......
......@@ -53,8 +53,8 @@ void ArangeInferMeta(const MetaTensor& start,
MetaTensor* out);
void InstanceNormInferMeta(const MetaTensor& x,
paddle::optional<const MetaTensor&> scale,
paddle::optional<const MetaTensor&> bias,
const MetaTensor& scale,
const MetaTensor& bias,
float epsilon,
MetaTensor* y,
MetaTensor* saved_mean,
......@@ -70,8 +70,8 @@ void GraphSendRecvInferMeta(const MetaTensor& x,
MetaTensor* dst_count);
void LayerNormInferMeta(const MetaTensor& x,
paddle::optional<const MetaTensor&> scale,
paddle::optional<const MetaTensor&> bias,
const MetaTensor& scale,
const MetaTensor& bias,
float epsilon,
int begin_norm_axis,
bool is_test,
......@@ -81,8 +81,8 @@ void LayerNormInferMeta(const MetaTensor& x,
MetaConfig config = MetaConfig());
void LayerNormGradInferMeta(const MetaTensor& x,
paddle::optional<const MetaTensor&> y,
paddle::optional<const MetaTensor&> z,
const MetaTensor& y,
const MetaTensor& z,
MetaTensor* dx,
MetaTensor* dy,
MetaTensor* dz);
......@@ -105,7 +105,7 @@ void LinspaceInferMeta(const MetaTensor& start,
void NllLossRawInferMeta(const MetaTensor& input,
const MetaTensor& label,
paddle::optional<const MetaTensor&> weight,
const MetaTensor& weight,
int64_t ignore_index,
const std::string& reduction,
MetaTensor* out,
......@@ -121,7 +121,7 @@ void PutAlongAxisInferMeta(const MetaTensor& x,
void RoiAlignInferMeta(const MetaTensor& x,
const MetaTensor& boxes,
paddle::optional<const MetaTensor&> boxes_num,
const MetaTensor& boxes_num,
int pooled_height,
int pooled_width,
float spatial_scale,
......@@ -132,7 +132,7 @@ void RoiAlignInferMeta(const MetaTensor& x,
void RoiPoolInferMeta(const MetaTensor& x,
const MetaTensor& boxes,
paddle::optional<const MetaTensor&> boxes_num,
const MetaTensor& boxes_num,
int pooled_height,
int pooled_width,
float spatial_scale,
......
......@@ -137,7 +137,7 @@ void SigmoidTripleGradKernel(const Context& dev_ctx,
const DenseTensor& dout,
const DenseTensor& ddx,
const DenseTensor& d_dout_new,
paddle::optional<const DenseTensor&> d_ddout,
const paddle::optional<DenseTensor>& d_ddout,
DenseTensor* d_out_new,
DenseTensor* d_dout,
DenseTensor* d_ddx);
......
......@@ -28,8 +28,8 @@ void AdamDenseKernel(const Context& dev_ctx,
const DenseTensor& moment2,
const DenseTensor& beta1_pow,
const DenseTensor& beta2_pow,
paddle::optional<const DenseTensor&> master_param,
paddle::optional<const DenseTensor&> skip_update,
const paddle::optional<DenseTensor>& master_param,
const paddle::optional<DenseTensor>& skip_update,
const Scalar& beta1,
const Scalar& beta2,
const Scalar& epsilon,
......
......@@ -28,8 +28,8 @@ void AdamwDenseKernel(const Context& dev_ctx,
const DenseTensor& moment2,
const DenseTensor& beta1_pow,
const DenseTensor& beta2_pow,
paddle::optional<const DenseTensor&> master_param,
paddle::optional<const DenseTensor&> skip_update,
const paddle::optional<DenseTensor>& master_param,
const paddle::optional<DenseTensor>& skip_update,
const Scalar& beta1,
const Scalar& beta2,
const Scalar& epsilon,
......
......@@ -31,7 +31,7 @@ void AssignKernel(const Context& dev_ctx,
template <typename Context>
void AssignRawKernel(const Context& dev_ctx,
paddle::optional<const DenseTensor&> x,
const paddle::optional<DenseTensor>& x,
DenseTensor* out) {
if (x) {
if (!x->IsInitialized()) {
......
......@@ -31,7 +31,7 @@ void AssignKernel(const Context& dev_ctx,
// this looks weird
template <typename Context>
void AssignRawKernel(const Context& dev_ctx,
paddle::optional<const DenseTensor&> x,
const paddle::optional<DenseTensor>& x,
DenseTensor* out);
template <typename Context>
......
......@@ -24,11 +24,11 @@ void BatchNormGradRawKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& scale,
const DenseTensor& bias,
paddle::optional<const DenseTensor&> mean,
paddle::optional<const DenseTensor&> variance,
const paddle::optional<DenseTensor>& mean,
const paddle::optional<DenseTensor>& variance,
const DenseTensor& saved_mean,
const DenseTensor& saved_variance,
paddle::optional<const DenseTensor&> reserve_space,
const paddle::optional<DenseTensor>& reserve_space,
const DenseTensor& y_grad,
float momentum,
float epsilon,
......@@ -47,11 +47,11 @@ void BatchNormGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& scale,
const DenseTensor& bias,
paddle::optional<const DenseTensor&> mean,
paddle::optional<const DenseTensor&> variance,
const paddle::optional<DenseTensor>& mean,
const paddle::optional<DenseTensor>& variance,
const DenseTensor& saved_mean,
const DenseTensor& saved_variance,
paddle::optional<const DenseTensor&> reserve_space,
const paddle::optional<DenseTensor>& reserve_space,
const DenseTensor& y_grad,
float momentum,
float epsilon,
......@@ -68,8 +68,8 @@ template <typename T, typename Context>
void BatchNormDoubleGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& scale,
paddle::optional<const DenseTensor&> mean,
paddle::optional<const DenseTensor&> variance,
const paddle::optional<DenseTensor>& mean,
const paddle::optional<DenseTensor>& variance,
const DenseTensor& saved_mean,
const DenseTensor& saved_variance,
const DenseTensor& y_grad,
......
......@@ -24,7 +24,7 @@ void BilinearTensorProductKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& y,
const DenseTensor& weight,
paddle::optional<const DenseTensor&> bias,
const paddle::optional<DenseTensor>& bias,
DenseTensor* out);
} // namespace phi
......@@ -21,7 +21,7 @@ namespace phi {
template <typename T, typename Context>
void BincountKernel(const Context& dev_ctx,
const DenseTensor& x,
const paddle::optional<const DenseTensor&> weights,
const paddle::optional<DenseTensor>& weights,
int minlength,
DenseTensor* out);
......
......@@ -23,8 +23,8 @@ void ConvGradGradKernel(const Context& dev_ctx,
const DenseTensor& input,
const DenseTensor& filter,
const DenseTensor& out_grad,
paddle::optional<const DenseTensor&> input_grad_grad,
paddle::optional<const DenseTensor&> filter_grad_grad,
const paddle::optional<DenseTensor>& input_grad_grad,
const paddle::optional<DenseTensor>& filter_grad_grad,
const std::vector<int>& strides,
const std::vector<int>& paddings,
const std::string& paddding_algorithm,
......@@ -40,8 +40,8 @@ void ConvGradGradKernel(const Context& dev_ctx,
template <typename T, typename Context>
void Conv3DGradGradKernel(const Context& dev_ctx,
paddle::optional<const DenseTensor&> input_grad_grad,
paddle::optional<const DenseTensor&> filter_grad_grad,
const paddle::optional<DenseTensor>& input_grad_grad,
const paddle::optional<DenseTensor>& filter_grad_grad,
const DenseTensor& out_grad,
const DenseTensor& input,
const DenseTensor& filter,
......
......@@ -36,8 +36,8 @@ void AdamDenseKernel(const Context& dev_ctx,
const DenseTensor& moment2,
const DenseTensor& beta1_pow,
const DenseTensor& beta2_pow,
paddle::optional<const DenseTensor&> master_param,
paddle::optional<const DenseTensor&> skip_update,
const paddle::optional<DenseTensor>& master_param,
const paddle::optional<DenseTensor>& skip_update,
const Scalar& beta1,
const Scalar& beta2,
const Scalar& epsilon,
......
......@@ -35,8 +35,8 @@ void AdamwDenseKernel(const Context& dev_ctx,
const DenseTensor& moment2,
const DenseTensor& beta1_pow,
const DenseTensor& beta2_pow,
paddle::optional<const DenseTensor&> master_param,
paddle::optional<const DenseTensor&> skip_update,
const paddle::optional<DenseTensor>& master_param,
const paddle::optional<DenseTensor>& skip_update,
const Scalar& beta1,
const Scalar& beta2,
const Scalar& epsilon,
......
......@@ -41,11 +41,11 @@ void BatchNormGradRawKernel(const Context& ctx,
const DenseTensor& x,
const DenseTensor& scale,
const DenseTensor& bias,
paddle::optional<const DenseTensor&> mean,
paddle::optional<const DenseTensor&> variance,
const paddle::optional<DenseTensor>& mean,
const paddle::optional<DenseTensor>& variance,
const DenseTensor& saved_mean,
const DenseTensor& saved_variance,
paddle::optional<const DenseTensor&> reserve_space,
const paddle::optional<DenseTensor>& reserve_space,
const DenseTensor& y_grad,
float momentum,
float epsilon,
......@@ -300,11 +300,11 @@ void BatchNormGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& scale,
const DenseTensor& bias,
paddle::optional<const DenseTensor&> mean,
paddle::optional<const DenseTensor&> variance,
const paddle::optional<DenseTensor>& mean,
const paddle::optional<DenseTensor>& variance,
const DenseTensor& saved_mean,
const DenseTensor& saved_variance,
paddle::optional<const DenseTensor&> reserve_space,
const paddle::optional<DenseTensor>& reserve_space,
const DenseTensor& y_grad,
float momentum,
float epsilon,
......@@ -343,8 +343,8 @@ template <typename T, typename Context>
void BatchNormDoubleGradKernel(const Context& ctx,
const DenseTensor& x,
const DenseTensor& scale,
paddle::optional<const DenseTensor&> mean,
paddle::optional<const DenseTensor&> variance,
const paddle::optional<DenseTensor>& mean,
const paddle::optional<DenseTensor>& variance,
const DenseTensor& saved_mean,
const DenseTensor& saved_variance,
const DenseTensor& y_grad,
......
......@@ -23,7 +23,7 @@ namespace phi {
template <typename Context, typename T, typename InputT>
void BincountInner(const Context& dev_ctx,
const DenseTensor& x,
const paddle::optional<const DenseTensor&> weights,
const paddle::optional<DenseTensor>& weights,
int minlength,
DenseTensor* out) {
const DenseTensor* input = &x;
......@@ -85,7 +85,7 @@ void BincountInner(const Context& dev_ctx,
template <typename T, typename Context>
void BincountKernel(const Context& dev_ctx,
const DenseTensor& x,
const paddle::optional<const DenseTensor&> weights,
const paddle::optional<DenseTensor>& weights,
int minlength,
DenseTensor* out) {
if (x.dtype() == DataType::INT32) {
......
......@@ -21,8 +21,8 @@
namespace phi {
template <typename T, typename Context>
void Conv3DGradGradKernel(const Context& ctx,
paddle::optional<const DenseTensor&> input_grad_grad,
paddle::optional<const DenseTensor&> filter_grad_grad,
const paddle::optional<DenseTensor>& input_grad_grad,
const paddle::optional<DenseTensor>& filter_grad_grad,
const DenseTensor& out_grad,
const DenseTensor& input,
const DenseTensor& filter,
......
......@@ -23,7 +23,7 @@ namespace phi {
template <typename T, typename Context>
void DropoutRawKernel(const Context& dev_ctx,
const DenseTensor& x,
paddle::optional<const DenseTensor&> seed_tensor,
const paddle::optional<DenseTensor>& seed_tensor,
float p,
bool is_test,
const std::string& mode,
......
......@@ -63,8 +63,8 @@ template <typename T, typename Context>
void AddDoubleGradKernel(const Context& dev_ctx,
const DenseTensor& y,
const DenseTensor& dout,
paddle::optional<const DenseTensor&> ddx,
paddle::optional<const DenseTensor&> ddy,
const paddle::optional<DenseTensor>& ddx,
const paddle::optional<DenseTensor>& ddy,
int axis,
DenseTensor* ddout) {
phi::AddDoubleGradImpl<T>(dev_ctx, y, ddx, ddy, dout, axis, ddout);
......
......@@ -39,8 +39,8 @@ template <typename T, typename Context>
void SubtractDoubleGradKernel(const Context& dev_ctx,
const DenseTensor& y,
const DenseTensor& dout,
paddle::optional<const DenseTensor&> ddx,
paddle::optional<const DenseTensor&> ddy,
const paddle::optional<DenseTensor>& ddx,
const paddle::optional<DenseTensor>& ddy,
int axis,
DenseTensor* ddout) {
phi::SubtractDoubleGradImpl<T>(dev_ctx, y, ddx, ddy, dout, axis, ddout);
......
......@@ -27,8 +27,8 @@ void GraphReindexKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& neighbors,
const DenseTensor& count,
paddle::optional<const DenseTensor&> hashtable_value,
paddle::optional<const DenseTensor&> hashtable_index,
const paddle::optional<DenseTensor>& hashtable_value,
const paddle::optional<DenseTensor>& hashtable_index,
bool flag_buffer_hashtable,
DenseTensor* reindex_src,
DenseTensor* reindex_dst,
......
......@@ -167,8 +167,8 @@ void GraphSampleNeighborsKernel(
const DenseTensor& row,
const DenseTensor& col_ptr,
const DenseTensor& x,
paddle::optional<const DenseTensor&> eids,
paddle::optional<const DenseTensor&> perm_buffer,
const paddle::optional<DenseTensor>& eids,
const paddle::optional<DenseTensor>& perm_buffer,
int sample_size,
bool return_eids,
bool flag_perm_buffer,
......
......@@ -121,8 +121,8 @@ void GraphSendRecvGradKernel(const Context& ctx,
const DenseTensor& x,
const DenseTensor& src_index,
const DenseTensor& dst_index,
paddle::optional<const DenseTensor&> out,
paddle::optional<const DenseTensor&> dst_count,
const paddle::optional<DenseTensor>& out,
const paddle::optional<DenseTensor>& dst_count,
const DenseTensor& out_grad,
const std::string& pool_type,
DenseTensor* x_grad) {
......
......@@ -31,9 +31,9 @@ void HierarchicalSigmoidGradKernelImpl(
const DenseTensor& x,
const DenseTensor& w,
const DenseTensor& label,
paddle::optional<const DenseTensor&> path,
paddle::optional<const DenseTensor&> code,
paddle::optional<const DenseTensor&> bias,
const paddle::optional<DenseTensor>& path,
const paddle::optional<DenseTensor>& code,
const paddle::optional<DenseTensor>& bias,
const DenseTensor& pre_out,
const DenseTensor& out_grad,
int num_classes,
......
......@@ -25,9 +25,9 @@ void HierarchicalSigmoidGradKernel(const Context& ctx,
const DenseTensor& x,
const DenseTensor& w,
const DenseTensor& label,
paddle::optional<const DenseTensor&> path,
paddle::optional<const DenseTensor&> code,
paddle::optional<const DenseTensor&> bias,
const paddle::optional<DenseTensor>& path,
const paddle::optional<DenseTensor>& code,
const paddle::optional<DenseTensor>& bias,
const DenseTensor& pre_out,
const DenseTensor& out_grad,
int num_classes,
......
......@@ -32,9 +32,9 @@ void HierarchicalSigmoidKernel(const Context& ctx,
const DenseTensor& x,
const DenseTensor& w,
const DenseTensor& label,
paddle::optional<const DenseTensor&> path,
paddle::optional<const DenseTensor&> code,
paddle::optional<const DenseTensor&> bias,
const paddle::optional<DenseTensor>& path,
const paddle::optional<DenseTensor>& code,
const paddle::optional<DenseTensor>& bias,
int num_classes,
bool remote_prefetch,
int trainer_id,
......
......@@ -43,7 +43,7 @@ template <typename T, typename Context>
void InstanceNormGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& d_y,
paddle::optional<const DenseTensor&> scale,
const paddle::optional<DenseTensor>& scale,
const DenseTensor& saved_mean,
const DenseTensor& saved_variance,
float epsilon,
......@@ -153,13 +153,13 @@ void InstanceNormGradKernel(const Context& dev_ctx,
template <typename T, typename Context>
void InstanceNormDoubleGradKernel(const Context& dev_ctx,
const DenseTensor& x,
paddle::optional<const DenseTensor&> scale,
const paddle::optional<DenseTensor>& scale,
const DenseTensor& saved_mean,
const DenseTensor& saved_variance,
const DenseTensor& dy,
paddle::optional<const DenseTensor&> ddx,
paddle::optional<const DenseTensor&> ddscale,
paddle::optional<const DenseTensor&> ddbias,
const paddle::optional<DenseTensor>& ddx,
const paddle::optional<DenseTensor>& ddscale,
const paddle::optional<DenseTensor>& ddbias,
float epsilon,
DenseTensor* dx,
DenseTensor* dscale,
......
......@@ -30,8 +30,8 @@ namespace phi {
template <typename T, typename Context>
void InstanceNormKernel(const Context& dev_ctx,
const DenseTensor& x,
paddle::optional<const DenseTensor&> scale,
paddle::optional<const DenseTensor&> bias,
const paddle::optional<DenseTensor>& scale,
const paddle::optional<DenseTensor>& bias,
float epsilon_f,
DenseTensor* y,
DenseTensor* saved_mean,
......
......@@ -361,9 +361,9 @@ template <typename T, typename Context>
static void Interpolate1DCPUBwd(
const Context& dev_ctx,
const DenseTensor& input,
paddle::optional<const DenseTensor&> out_size,
paddle::optional<const std::vector<const DenseTensor*>> size_tensor,
paddle::optional<const DenseTensor&> scale_tensor,
const paddle::optional<DenseTensor>& out_size,
const paddle::optional<std::vector<const DenseTensor*>>& size_tensor,
const paddle::optional<DenseTensor>& scale_tensor,
const DenseTensor& output_grad,
const std::string& data_layout_str,
int out_w,
......@@ -459,9 +459,9 @@ template <typename T, typename Context>
static void Interpolate2DCPUBwd(
const Context& dev_ctx,
const DenseTensor& input,
paddle::optional<const DenseTensor&> out_size,
paddle::optional<const std::vector<const DenseTensor*>> size_tensor,
paddle::optional<const DenseTensor&> scale_tensor,
const paddle::optional<DenseTensor>& out_size,
const paddle::optional<std::vector<const DenseTensor*>>& size_tensor,
const paddle::optional<DenseTensor>& scale_tensor,
const DenseTensor& output_grad,
const std::string& data_layout_str,
int out_h,
......@@ -619,9 +619,9 @@ template <typename T, typename Context>
static void Interpolate3DCPUBwd(
const Context& dev_ctx,
const DenseTensor& input,
paddle::optional<const DenseTensor&> out_size,
paddle::optional<const std::vector<const DenseTensor*>> size_tensor,
paddle::optional<const DenseTensor&> scale_tensor,
const paddle::optional<DenseTensor>& out_size,
const paddle::optional<std::vector<const DenseTensor*>>& size_tensor,
const paddle::optional<DenseTensor>& scale_tensor,
const DenseTensor& output_grad,
const std::string& data_layout_str,
int out_d,
......@@ -800,9 +800,9 @@ template <typename T, typename Context>
void InterpolateGradKernel(
const Context& dev_ctx,
const DenseTensor& x,
paddle::optional<const DenseTensor&> out_size,
paddle::optional<const std::vector<const DenseTensor*>> size_tensor,
paddle::optional<const DenseTensor&> scale_tensor,
const paddle::optional<DenseTensor>& out_size,
const paddle::optional<std::vector<const DenseTensor*>>& size_tensor,
const paddle::optional<DenseTensor>& scale_tensor,
const DenseTensor& output_grad,
const std::string& data_layout,
int out_d,
......@@ -867,9 +867,9 @@ template <typename T, typename Context>
void BilinearInterpGradKernel(
const Context& dev_ctx,
const DenseTensor& x,
paddle::optional<const DenseTensor&> out_size,
paddle::optional<const std::vector<const DenseTensor*>> size_tensor,
paddle::optional<const DenseTensor&> scale_tensor,
const paddle::optional<DenseTensor>& out_size,
const paddle::optional<std::vector<const DenseTensor*>>& size_tensor,
const paddle::optional<DenseTensor>& scale_tensor,
const DenseTensor& out_grad,
const std::string& data_layout,
int out_d,
......@@ -901,9 +901,9 @@ template <typename T, typename Context>
void NearestInterpGradKernel(
const Context& dev_ctx,
const DenseTensor& x,
paddle::optional<const DenseTensor&> out_size,
paddle::optional<const std::vector<const DenseTensor*>> size_tensor,
paddle::optional<const DenseTensor&> scale_tensor,
const paddle::optional<DenseTensor>& out_size,
const paddle::optional<std::vector<const DenseTensor*>>& size_tensor,
const paddle::optional<DenseTensor>& scale_tensor,
const DenseTensor& out_grad,
const std::string& data_layout,
int out_d,
......@@ -935,9 +935,9 @@ template <typename T, typename Context>
void TrilinearInterpGradKernel(
const Context& dev_ctx,
const DenseTensor& x,
paddle::optional<const DenseTensor&> out_size,
paddle::optional<const std::vector<const DenseTensor*>> size_tensor,
paddle::optional<const DenseTensor&> scale_tensor,
const paddle::optional<DenseTensor>& out_size,
const paddle::optional<std::vector<const DenseTensor*>>& size_tensor,
const paddle::optional<DenseTensor>& scale_tensor,
const DenseTensor& out_grad,
const std::string& data_layout,
int out_d,
......@@ -969,9 +969,9 @@ template <typename T, typename Context>
void LinearInterpGradKernel(
const Context& dev_ctx,
const DenseTensor& x,
paddle::optional<const DenseTensor&> out_size,
paddle::optional<const std::vector<const DenseTensor*>> size_tensor,
paddle::optional<const DenseTensor&> scale_tensor,
const paddle::optional<DenseTensor>& out_size,
const paddle::optional<std::vector<const DenseTensor*>>& size_tensor,
const paddle::optional<DenseTensor>& scale_tensor,
const DenseTensor& out_grad,
const std::string& data_layout,
int out_d,
......@@ -1003,9 +1003,9 @@ template <typename T, typename Context>
void BicubicInterpGradKernel(
const Context& dev_ctx,
const DenseTensor& x,
paddle::optional<const DenseTensor&> out_size,
paddle::optional<const std::vector<const DenseTensor*>> size_tensor,
paddle::optional<const DenseTensor&> scale_tensor,
const paddle::optional<DenseTensor>& out_size,
const paddle::optional<std::vector<const DenseTensor*>>& size_tensor,
const paddle::optional<DenseTensor>& scale_tensor,
const DenseTensor& out_grad,
const std::string& data_layout,
int out_d,
......
......@@ -504,9 +504,9 @@ template <typename T, typename Context>
static void Interpolate1DCPUFwd(
const Context& dev_ctx,
const DenseTensor& x,
paddle::optional<const DenseTensor&> out_size,
paddle::optional<const std::vector<const DenseTensor*>> size_tensor,
paddle::optional<const DenseTensor&> scale_tensor,
const paddle::optional<DenseTensor>& out_size,
const paddle::optional<std::vector<const DenseTensor*>>& size_tensor,
const paddle::optional<DenseTensor>& scale_tensor,
const std::string& data_layout_str,
int out_w,
const std::vector<float>& scale,
......@@ -603,9 +603,9 @@ template <typename T, typename Context>
static void Interpolate2DCPUFwd(
const Context& dev_ctx,
const DenseTensor& x,
paddle::optional<const DenseTensor&> out_size,
paddle::optional<const std::vector<const DenseTensor*>> size_tensor,
paddle::optional<const DenseTensor&> scale_tensor,
const paddle::optional<DenseTensor>& out_size,
const paddle::optional<std::vector<const DenseTensor*>>& size_tensor,
const paddle::optional<DenseTensor>& scale_tensor,
const std::string& data_layout_str,
int out_h,
int out_w,
......@@ -770,9 +770,9 @@ template <typename T, typename Context>
static void Interpolate3DCPUFwd(
const Context& dev_ctx,
const DenseTensor& x,
paddle::optional<const DenseTensor&> out_size,
paddle::optional<const std::vector<const DenseTensor*>> size_tensor,
paddle::optional<const DenseTensor&> scale_tensor,
const paddle::optional<DenseTensor>& out_size,
const paddle::optional<std::vector<const DenseTensor*>>& size_tensor,
const paddle::optional<DenseTensor>& scale_tensor,
const std::string& data_layout_str,
int out_d,
int out_h,
......@@ -966,9 +966,9 @@ template <typename T, typename Context>
void InterpolateKernel(
const Context& ctx,
const DenseTensor& x,
paddle::optional<const DenseTensor&> out_size,
paddle::optional<const std::vector<const DenseTensor*>> size_tensor,
paddle::optional<const DenseTensor&> scale_tensor,
const paddle::optional<DenseTensor>& out_size,
const paddle::optional<std::vector<const DenseTensor*>>& size_tensor,
const paddle::optional<DenseTensor>& scale_tensor,
const std::string& data_layout,
int out_d,
int out_h,
......@@ -1029,9 +1029,9 @@ template <typename T, typename Context>
void BilinearInterpKernel(
const Context& ctx,
const DenseTensor& x,
paddle::optional<const DenseTensor&> out_size,
paddle::optional<const std::vector<const DenseTensor*>> size_tensor,
paddle::optional<const DenseTensor&> scale_tensor,
const paddle::optional<DenseTensor>& out_size,
const paddle::optional<std::vector<const DenseTensor*>>& size_tensor,
const paddle::optional<DenseTensor>& scale_tensor,
const std::string& data_layout,
int out_d,
int out_h,
......@@ -1061,9 +1061,9 @@ template <typename T, typename Context>
void NearestInterpKernel(
const Context& ctx,
const DenseTensor& x,
paddle::optional<const DenseTensor&> out_size,
paddle::optional<const std::vector<const DenseTensor*>> size_tensor,
paddle::optional<const DenseTensor&> scale_tensor,
const paddle::optional<DenseTensor>& out_size,
const paddle::optional<std::vector<const DenseTensor*>>& size_tensor,
const paddle::optional<DenseTensor>& scale_tensor,
const std::string& data_layout,
int out_d,
int out_h,
......@@ -1093,9 +1093,9 @@ template <typename T, typename Context>
void TrilinearInterpKernel(
const Context& ctx,
const DenseTensor& x,
paddle::optional<const DenseTensor&> out_size,
paddle::optional<const std::vector<const DenseTensor*>> size_tensor,
paddle::optional<const DenseTensor&> scale_tensor,
const paddle::optional<DenseTensor>& out_size,
const paddle::optional<std::vector<const DenseTensor*>>& size_tensor,
const paddle::optional<DenseTensor>& scale_tensor,
const std::string& data_layout,
int out_d,
int out_h,
......@@ -1125,9 +1125,9 @@ template <typename T, typename Context>
void LinearInterpKernel(
const Context& ctx,
const DenseTensor& x,
paddle::optional<const DenseTensor&> out_size,
paddle::optional<const std::vector<const DenseTensor*>> size_tensor,
paddle::optional<const DenseTensor&> scale_tensor,
const paddle::optional<DenseTensor>& out_size,
const paddle::optional<std::vector<const DenseTensor*>>& size_tensor,
const paddle::optional<DenseTensor>& scale_tensor,
const std::string& data_layout,
int out_d,
int out_h,
......@@ -1157,9 +1157,9 @@ template <typename T, typename Context>
void BicubicInterpKernel(
const Context& ctx,
const DenseTensor& x,
paddle::optional<const DenseTensor&> out_size,
paddle::optional<const std::vector<const DenseTensor*>> size_tensor,
paddle::optional<const DenseTensor&> scale_tensor,
const paddle::optional<DenseTensor>& out_size,
const paddle::optional<std::vector<const DenseTensor*>>& size_tensor,
const paddle::optional<DenseTensor>& scale_tensor,
const std::string& data_layout,
int out_d,
int out_h,
......
......@@ -22,7 +22,7 @@ namespace phi {
template <typename T, typename Context>
void LabelSmoothKernel(const Context& ctx,
const DenseTensor& label,
paddle::optional<const DenseTensor&> prior_dist,
const paddle::optional<DenseTensor>& prior_dist,
float epsilon,
DenseTensor* out) {
auto label_dim = label.dims()[label.dims().size() - 1];
......
......@@ -32,8 +32,8 @@ namespace phi {
template <typename T, typename Context>
void LayerNormGradKernel(const Context& dev_ctx,
const DenseTensor& x,
paddle::optional<const DenseTensor&> scale_opt,
paddle::optional<const DenseTensor&> bias_opt,
const paddle::optional<DenseTensor>& scale_opt,
const paddle::optional<DenseTensor>& bias_opt,
const DenseTensor& mean,
const DenseTensor& variance,
const DenseTensor& out_grad,
......
......@@ -30,8 +30,8 @@ namespace phi {
template <typename T, typename Context>
void LayerNormKernel(const Context& dev_ctx,
const DenseTensor& x,
paddle::optional<const DenseTensor&> scale_opt,
paddle::optional<const DenseTensor&> bias_opt,
const paddle::optional<DenseTensor>& scale_opt,
const paddle::optional<DenseTensor>& bias_opt,
float epsilon,
int begin_norm_axis,
bool is_test,
......
......@@ -121,7 +121,7 @@ template <typename T, typename Context>
void NllLossGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& labels,
paddle::optional<const DenseTensor&> weight,
const paddle::optional<DenseTensor>& weight,
const DenseTensor& total_weight,
const DenseTensor& d_out,
int64_t ignore_index,
......
......@@ -154,7 +154,7 @@ template <typename T, typename Context>
void NllLossRawKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& labels,
paddle::optional<const DenseTensor&> weight,
const paddle::optional<DenseTensor>& weight,
int64_t ignore_index,
const std::string& reduction,
DenseTensor* out,
......
......@@ -24,7 +24,7 @@ template <typename T, typename Context>
void PsroiPoolGradKernel(const Context& ctx,
const DenseTensor& x,
const DenseTensor& rois,
paddle::optional<const DenseTensor&> rois_num,
const paddle::optional<DenseTensor>& rois_num,
const DenseTensor& dout,
int pooled_height,
int pooled_width,
......
......@@ -23,7 +23,7 @@ template <typename T, typename Context>
void PsroiPoolKernel(const Context& ctx,
const DenseTensor& x,
const DenseTensor& rois,
paddle::optional<const DenseTensor&> rois_num,
const paddle::optional<DenseTensor>& rois_num,
int pooled_height,
int pooled_width,
int output_channels,
......
......@@ -969,7 +969,7 @@ void RnnGradFunc(const CPUContext& dev_ctx,
const DenseTensor& x,
const std::vector<const DenseTensor*>& pre_state,
const std::vector<const DenseTensor*>& weight_list,
paddle::optional<const DenseTensor&> sequence_length,
const paddle::optional<DenseTensor>& sequence_length,
const DenseTensor& out,
const DenseTensor& dropout_state,
const DenseTensor& reserve,
......@@ -1244,7 +1244,7 @@ void RnnGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const std::vector<const DenseTensor*>& pre_state,
const std::vector<const DenseTensor*>& weight_list,
paddle::optional<const DenseTensor&> sequence_length,
const paddle::optional<DenseTensor>& sequence_length,
const DenseTensor& out,
const DenseTensor& dropout_state,
const DenseTensor& reserve,
......
......@@ -819,7 +819,7 @@ void RnnKernel(const Context& dev_ctx,
const DenseTensor& x,
const std::vector<const DenseTensor*>& pre_state,
const std::vector<const DenseTensor*>& weight_list,
paddle::optional<const DenseTensor&> sequence_length,
const paddle::optional<DenseTensor>& sequence_length,
float dropout_prob,
bool is_bidirec,
int input_size,
......
......@@ -73,7 +73,7 @@ template <typename T, typename Context>
void RoiAlignGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& boxes,
paddle::optional<const DenseTensor&> boxes_num,
const paddle::optional<DenseTensor>& boxes_num,
const DenseTensor& out_grad,
int pooled_height,
int pooled_width,
......
......@@ -182,7 +182,7 @@ template <typename T, typename Context>
void RoiAlignKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& boxes,
paddle::optional<const DenseTensor&> boxes_num,
const paddle::optional<DenseTensor>& boxes_num,
int pooled_height,
int pooled_width,
float spatial_scale,
......
......@@ -25,7 +25,7 @@ template <typename T, typename Context>
void RoiPoolGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& boxes,
paddle::optional<const DenseTensor&> boxes_num,
const paddle::optional<DenseTensor>& boxes_num,
const DenseTensor& arg_max,
const DenseTensor& out_grad,
int pooled_height,
......
......@@ -24,7 +24,7 @@ template <typename T, typename Context>
void RoiPoolKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& boxes,
paddle::optional<const DenseTensor&> boxes_num,
const paddle::optional<DenseTensor>& boxes_num,
int pooled_height,
int pooled_width,
float spatial_scale,
......
......@@ -118,7 +118,7 @@ void SGDDenseKernel(const Context& dev_ctx,
const DenseTensor& param,
const DenseTensor& learning_rate,
const DenseTensor& grad,
paddle::optional<const DenseTensor&> master_param,
const paddle::optional<DenseTensor>& master_param,
bool multi_precision,
DenseTensor* param_out,
DenseTensor* master_param_out) {
......@@ -132,7 +132,7 @@ void SGDDenseParamSparseGradKernel(
const DenseTensor& param,
const DenseTensor& learning_rate,
const SelectedRows& grad,
paddle::optional<const DenseTensor&> master_param,
const paddle::optional<DenseTensor>& master_param,
bool multi_precision,
DenseTensor* param_out,
DenseTensor* master_param_out) {
......@@ -146,7 +146,7 @@ void SGDSparseParamSparseGradKernel(
const SelectedRows& param,
const DenseTensor& learning_rate,
const SelectedRows& grad,
paddle::optional<const SelectedRows&> master_param,
const paddle::optional<SelectedRows>& master_param,
bool multi_precision,
SelectedRows* param_out,
SelectedRows* master_param_out) {
......
......@@ -121,7 +121,7 @@ void Yolov3LossGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& gt_box,
const DenseTensor& gt_label,
paddle::optional<const DenseTensor&> gt_score,
const paddle::optional<DenseTensor>& gt_score,
const DenseTensor& loss_grad,
const DenseTensor& objectness_mask,
const DenseTensor& gt_match_mask,
......
......@@ -182,7 +182,7 @@ void Yolov3LossKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& gt_box,
const DenseTensor& gt_label,
paddle::optional<const DenseTensor&> gt_score,
const paddle::optional<DenseTensor>& gt_score,
const std::vector<int>& anchors,
const std::vector<int>& anchor_mask,
int class_num,
......
......@@ -23,7 +23,7 @@ void DeformableConvGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& offset,
const DenseTensor& filter,
paddle::optional<const DenseTensor&> mask,
const paddle::optional<DenseTensor>& mask,
const DenseTensor& out_grad,
const std::vector<int>& strides,
const std::vector<int>& paddings,
......
......@@ -24,7 +24,7 @@ void DeformableConvKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& offset,
const DenseTensor& filter,
paddle::optional<const DenseTensor&> mask,
const paddle::optional<DenseTensor>& mask,
const std::vector<int>& strides,
const std::vector<int>& paddings,
const std::vector<int>& dilations,
......
......@@ -22,7 +22,7 @@ namespace phi {
template <typename T, typename Context>
void DropoutRawKernel(const Context& dev_ctx,
const DenseTensor& x,
paddle::optional<const DenseTensor&> seed_tensor,
const paddle::optional<DenseTensor>& seed_tensor,
float p,
bool is_test,
const std::string& mode,
......
......@@ -32,8 +32,8 @@ template <typename T, typename Context>
void AddDoubleGradKernel(const Context& dev_ctx,
const DenseTensor& y,
const DenseTensor& dout,
paddle::optional<const DenseTensor&> ddx,
paddle::optional<const DenseTensor&> ddy,
const paddle::optional<DenseTensor>& ddx,
const paddle::optional<DenseTensor>& ddy,
int axis,
DenseTensor* ddout);
......
......@@ -34,8 +34,8 @@ void DivideDoubleGradKernel(const Context& dev_ctx,
const DenseTensor& y,
const DenseTensor& out,
const DenseTensor& dx,
paddle::optional<const DenseTensor&> ddx,
paddle::optional<const DenseTensor&> ddy,
const paddle::optional<DenseTensor>& ddx,
const paddle::optional<DenseTensor>& ddy,
int axis,
DenseTensor* dy,
DenseTensor* dout,
......
......@@ -33,8 +33,8 @@ void MultiplyDoubleGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& y,
const DenseTensor& dout,
paddle::optional<const DenseTensor&> ddx,
paddle::optional<const DenseTensor&> ddy,
const paddle::optional<DenseTensor>& ddx,
const paddle::optional<DenseTensor>& ddy,
int axis,
DenseTensor* dx,
DenseTensor* dy,
......@@ -45,11 +45,11 @@ void MultiplyTripleGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& y,
const DenseTensor& dout,
paddle::optional<const DenseTensor&> ddx,
paddle::optional<const DenseTensor&> ddy,
const paddle::optional<DenseTensor>& ddx,
const paddle::optional<DenseTensor>& ddy,
const DenseTensor& d_dx,
const DenseTensor& d_dy,
paddle::optional<const DenseTensor&> d_ddout,
const paddle::optional<DenseTensor>& d_ddout,
int axis,
DenseTensor* d_x,
DenseTensor* d_y,
......
......@@ -31,8 +31,8 @@ template <typename T, typename Context>
void SubtractDoubleGradKernel(const Context& dev_ctx,
const DenseTensor& y,
const DenseTensor& dout,
paddle::optional<const DenseTensor&> ddx,
paddle::optional<const DenseTensor&> ddy,
const paddle::optional<DenseTensor>& ddx,
const paddle::optional<DenseTensor>& ddy,
int axis,
DenseTensor* ddout);
......
......@@ -21,7 +21,7 @@ namespace phi {
template <typename T, typename Context>
void ExpandAsKernel(const Context& ctx,
const DenseTensor& x,
paddle::optional<const DenseTensor&> y,
const paddle::optional<DenseTensor>& y,
const std::vector<int>& target_shape,
DenseTensor* out);
......
......@@ -170,7 +170,7 @@ template <typename T, typename PoolProcess>
__global__ void KernelPool2DGrad(const int nthreads,
const T* __restrict__ input_data,
const T* __restrict__ output_data,
const const T* __restrict__ output_grad,
const T* __restrict__ output_grad,
const int output_width,
const int output_height,
const int input_width,
......
......@@ -90,7 +90,7 @@ class SegmentPoolGradFunctor<phi::CPUContext, T, IndexT> {
const DenseTensor& out_grad,
const DenseTensor& segments,
DenseTensor* in_grad,
paddle::optional<const DenseTensor&> index,
const paddle::optional<DenseTensor>& index,
const std::string pooltype = "SUM") {
const IndexT* segment_ids = segments.data<IndexT>();
auto& place = *dev_ctx.eigen_device();
......
......@@ -417,7 +417,7 @@ class SegmentPoolGradFunctor<phi::GPUContext, T, IndexT> {
const DenseTensor& out_grad,
const DenseTensor& segments,
DenseTensor* in_grad,
paddle::optional<const DenseTensor&> summed_ids,
const paddle::optional<DenseTensor>& summed_ids,
const std::string pooltype = "SUM") {
if (pooltype == "MAX" || pooltype == "MIN") {
SegmentPoolCUDAGradFunctor<T, IndexT>(
......
......@@ -41,7 +41,7 @@ class SegmentPoolGradFunctor {
const DenseTensor& out_grad,
const DenseTensor& segments,
DenseTensor* in_grad,
paddle::optional<const DenseTensor&> summed_ids,
const paddle::optional<DenseTensor>& summed_ids,
const std::string pooltype = "SUM");
};
......
......@@ -135,8 +135,8 @@ void AdamDenseKernel(const Context& dev_ctx,
const DenseTensor& moment2,
const DenseTensor& beta1_pow,
const DenseTensor& beta2_pow,
paddle::optional<const DenseTensor&> master_param,
paddle::optional<const DenseTensor&> skip_update,
const paddle::optional<DenseTensor>& master_param,
const paddle::optional<DenseTensor>& skip_update,
const Scalar& beta1,
const Scalar& beta2,
const Scalar& epsilon,
......
......@@ -146,8 +146,8 @@ void AdamwDenseKernel(const Context& dev_ctx,
const DenseTensor& moment2,
const DenseTensor& beta1_pow,
const DenseTensor& beta2_pow,
paddle::optional<const DenseTensor&> master_param,
paddle::optional<const DenseTensor&> skip_update,
const paddle::optional<DenseTensor>& master_param,
const paddle::optional<DenseTensor>& skip_update,
const Scalar& beta1,
const Scalar& beta2,
const Scalar& epsilon,
......
......@@ -309,11 +309,11 @@ void BatchNormGradRawKernel(const Context &ctx,
const DenseTensor &x,
const DenseTensor &scale,
const DenseTensor &bias,
paddle::optional<const DenseTensor &> mean,
paddle::optional<const DenseTensor &> variance,
const paddle::optional<DenseTensor> &mean,
const paddle::optional<DenseTensor> &variance,
const DenseTensor &saved_mean,
const DenseTensor &saved_variance,
paddle::optional<const DenseTensor &> reserve_space,
const paddle::optional<DenseTensor> &reserve_space,
const DenseTensor &y_grad,
float momentum,
float epsilon_f,
......@@ -867,11 +867,11 @@ void BatchNormGradKernel(const Context &dev_ctx,
const DenseTensor &x,
const DenseTensor &scale,
const DenseTensor &bias,
paddle::optional<const DenseTensor &> mean,
paddle::optional<const DenseTensor &> variance,
const paddle::optional<DenseTensor> &mean,
const paddle::optional<DenseTensor> &variance,
const DenseTensor &saved_mean,
const DenseTensor &saved_variance,
paddle::optional<const DenseTensor &> reserve_space,
const paddle::optional<DenseTensor> &reserve_space,
const DenseTensor &y_grad,
float momentum,
float epsilon,
......@@ -910,8 +910,8 @@ template <typename T, typename Context>
void BatchNormDoubleGradKernel(const Context &ctx,
const DenseTensor &x,
const DenseTensor &scale,
paddle::optional<const DenseTensor &> mean,
paddle::optional<const DenseTensor &> variance,
const paddle::optional<DenseTensor> &mean,
const paddle::optional<DenseTensor> &variance,
const DenseTensor &saved_mean,
const DenseTensor &saved_variance,
const DenseTensor &y_grad,
......
......@@ -49,7 +49,7 @@ __global__ void KernelBincount(const InputT* input,
template <typename Context, typename T, typename InputT>
void BincountCUDAInner(const Context& dev_ctx,
const DenseTensor& x,
const paddle::optional<const DenseTensor&> weights,
const paddle::optional<DenseTensor>& weights,
int minlength,
DenseTensor* out) {
const DenseTensor* input = &x;
......@@ -143,7 +143,7 @@ void BincountCUDAInner(const Context& dev_ctx,
template <typename T, typename Context>
void BincountKernel(const Context& dev_ctx,
const DenseTensor& x,
const paddle::optional<const DenseTensor&> weights,
const paddle::optional<DenseTensor>& weights,
int minlength,
DenseTensor* out) {
if (x.dtype() == DataType::INT32) {
......
......@@ -23,7 +23,7 @@ namespace phi {
template <typename T, typename Context>
void DropoutRawKernel(const Context& dev_ctx,
const DenseTensor& x,
paddle::optional<const DenseTensor&> seed_tensor,
const paddle::optional<DenseTensor>& seed_tensor,
float p,
bool is_test,
const std::string& mode,
......
......@@ -57,8 +57,8 @@ template <typename T, typename Context>
void AddDoubleGradKernel(const Context& dev_ctx,
const DenseTensor& y,
const DenseTensor& dout,
paddle::optional<const DenseTensor&> ddx,
paddle::optional<const DenseTensor&> ddy,
const paddle::optional<DenseTensor>& ddx,
const paddle::optional<DenseTensor>& ddy,
int axis,
DenseTensor* ddout) {
phi::AddDoubleGradImpl<T>(dev_ctx, y, ddx, ddy, dout, axis, ddout);
......
......@@ -47,8 +47,8 @@ template <typename T, typename Context>
void SubtractDoubleGradKernel(const Context& dev_ctx,
const DenseTensor& y,
const DenseTensor& dout,
paddle::optional<const DenseTensor&> ddx,
paddle::optional<const DenseTensor&> ddy,
const paddle::optional<DenseTensor>& ddx,
const paddle::optional<DenseTensor>& ddy,
int axis,
DenseTensor* ddout) {
phi::SubtractDoubleGradImpl<T>(dev_ctx, y, ddx, ddy, dout, axis, ddout);
......
......@@ -286,8 +286,8 @@ void GraphReindexKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& neighbors,
const DenseTensor& count,
paddle::optional<const DenseTensor&> hashtable_value,
paddle::optional<const DenseTensor&> hashtable_index,
const paddle::optional<DenseTensor>& hashtable_value,
const paddle::optional<DenseTensor>& hashtable_index,
bool flag_buffer_hashtable,
DenseTensor* reindex_src,
DenseTensor* reindex_dst,
......
......@@ -356,8 +356,8 @@ void GraphSampleNeighborsKernel(
const DenseTensor& row,
const DenseTensor& col_ptr,
const DenseTensor& x,
paddle::optional<const DenseTensor&> eids,
paddle::optional<const DenseTensor&> perm_buffer,
const paddle::optional<DenseTensor>& eids,
const paddle::optional<DenseTensor>& perm_buffer,
int sample_size,
bool return_eids,
bool flag_perm_buffer,
......
......@@ -105,8 +105,8 @@ void GraphSendRecvGradKernel(const Context& ctx,
const DenseTensor& x,
const DenseTensor& src_index,
const DenseTensor& dst_index,
paddle::optional<const DenseTensor&> out,
paddle::optional<const DenseTensor&> dst_count,
const paddle::optional<DenseTensor>& out,
const paddle::optional<DenseTensor>& dst_count,
const DenseTensor& out_grad,
const std::string& pool_type,
DenseTensor* x_grad) {
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册