未验证 提交 a0a6dc6a 编写于 作者: HappyHeavyRain's avatar HappyHeavyRain 提交者: GitHub

[New features]Add function node in phi_kernel for MKLDNN (#51073)

* Add function node in phi_kernel for MKLDNN

* fix the bug in 'BuildInferVarKernelContext'

* add infer_varkernel_utils.cc

* fix the bug:the first two parametes of 'BuildInferVarKernelContext' can't be template variable

* change the code according to first review

* change the code according to first review

* change the mode of paddle_build.sh

* change 'infer_var_kernel_fn_' to 'get_kerneltype_forvar_fn_'

* add the error information

* fix NotFound infomation warning

* fix NotFound infomation warning

* fix NotFound infomation warning
上级 bfb79ee2
...@@ -479,7 +479,8 @@ if(WITH_XPU) ...@@ -479,7 +479,8 @@ if(WITH_XPU)
kernel_factory kernel_factory
infershape_utils infershape_utils
op_utils op_utils
op_compat_infos) op_compat_infos
get_kerneltype_forvar_utils)
else() else()
cc_library( cc_library(
operator operator
...@@ -505,7 +506,8 @@ else() ...@@ -505,7 +506,8 @@ else()
kernel_factory kernel_factory
infershape_utils infershape_utils
op_utils op_utils
op_compat_infos) op_compat_infos
get_kerneltype_forvar_utils)
endif() endif()
cc_test( cc_test(
......
...@@ -156,5 +156,31 @@ void SetTensorToVariable(const Variable &in_var, ...@@ -156,5 +156,31 @@ void SetTensorToVariable(const Variable &in_var,
} }
} }
phi::GetKernelTypeForVarContext BuildGetKernelTypeForVarContext(
const phi::KernelKey &kernel_key,
const AttributeMap &fluid_attrs,
phi::AttributeMap *phi_attrs,
bool has_infer_varkernel_fn) {
// According to "GetKernelTypeForVar" in some ops those have MKLDNN codes,
// the only "string" member, such as "data_layout" 、"data_format" of
// AttibuteMap is useful. In the future the other args maybe used. Because the
// "phi" module should not depend on the "fluid", transform
// "framework::AttributeMap" to "phi::AttributeMap".
if (has_infer_varkernel_fn) {
for (auto &attr : fluid_attrs) {
switch (attr.second.index()) {
case 3: // string type in framwork::Attribute
(*phi_attrs)[attr.first] = PADDLE_GET_CONST(std::string, attr.second);
break;
default:
VLOG(6) << "GetKernelTypeForVarContext currently only use "
"std::string. You add other type if need.";
break;
}
}
}
return phi::GetKernelTypeForVarContext(&kernel_key, phi_attrs);
}
} // namespace framework } // namespace framework
} // namespace paddle } // namespace paddle
...@@ -25,6 +25,7 @@ limitations under the License. */ ...@@ -25,6 +25,7 @@ limitations under the License. */
#include "paddle/fluid/platform/device_context.h" #include "paddle/fluid/platform/device_context.h"
#include "paddle/fluid/platform/macros.h" #include "paddle/fluid/platform/macros.h"
#include "paddle/phi/common/transform.h" #include "paddle/phi/common/transform.h"
#include "paddle/phi/core/compat/get_kerneltype_forvar_utils.h"
#include "paddle/phi/kernels/funcs/math_function.h" #include "paddle/phi/kernels/funcs/math_function.h"
namespace paddle { namespace paddle {
...@@ -45,5 +46,12 @@ void TransformData(const phi::KernelKey &expected_kernel_type, ...@@ -45,5 +46,12 @@ void TransformData(const phi::KernelKey &expected_kernel_type,
void SetTensorToVariable(const Variable &in_var, void SetTensorToVariable(const Variable &in_var,
const phi::DenseTensor &tensor, const phi::DenseTensor &tensor,
Variable *out_var); Variable *out_var);
phi::GetKernelTypeForVarContext BuildGetKernelTypeForVarContext(
const phi::KernelKey &kernel_key,
const AttributeMap &fluid_attrs,
phi::AttributeMap *phi_attrs,
bool has_infer_varkernel_fn);
} // namespace framework } // namespace framework
} // namespace paddle } // namespace paddle
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include "paddle/fluid/framework/new_executor/interpreter/data_transfer.h" #include "paddle/fluid/framework/new_executor/interpreter/data_transfer.h"
#include "paddle/fluid/framework/convert_utils.h" #include "paddle/fluid/framework/convert_utils.h"
#include "paddle/fluid/framework/data_transform.h"
#include "paddle/fluid/framework/new_executor/interpreter/interpreter_util.h" #include "paddle/fluid/framework/new_executor/interpreter/interpreter_util.h"
#include "paddle/phi/core/kernel_context.h" #include "paddle/phi/core/kernel_context.h"
#include "paddle/phi/core/kernel_factory.h" #include "paddle/phi/core/kernel_factory.h"
...@@ -474,7 +475,17 @@ void ApplyDataTransform(const OpKernelType& expected_kernel_key, ...@@ -474,7 +475,17 @@ void ApplyDataTransform(const OpKernelType& expected_kernel_key,
bool transfered = false; bool transfered = false;
DataTranferHelper data_transfer_helper(place, var_scope, local_scope); DataTranferHelper data_transfer_helper(place, var_scope, local_scope);
phi::Kernel* phi_kernel = op_func_node->phi_kernel_;
auto has_infer_varkernel_fn =
(phi_kernel && phi_kernel->get_kerneltype_forvar_fn_ != nullptr);
phi::AttributeMap infer_attrs{};
auto fluid_attrs =
static_cast<const framework::OperatorWithKernel*>(op_base)->Attrs();
auto phi_kernelkey =
framework::TransOpKernelTypeToPhiKernelKey(expected_kernel_key);
phi::GetKernelTypeForVarContext infer_varkernel_context =
BuildGetKernelTypeForVarContext(
phi_kernelkey, fluid_attrs, &infer_attrs, has_infer_varkernel_fn);
auto apply_data_transform_for_one_parameter = auto apply_data_transform_for_one_parameter =
[&](const std::string& parameter_name, [&](const std::string& parameter_name,
const std::vector<std::string>& argument_names, const std::vector<std::string>& argument_names,
...@@ -551,11 +562,15 @@ void ApplyDataTransform(const OpKernelType& expected_kernel_key, ...@@ -551,11 +562,15 @@ void ApplyDataTransform(const OpKernelType& expected_kernel_key,
auto kernel_key_for_var = auto kernel_key_for_var =
static_cast<const framework::OperatorWithKernel*>(op_base) static_cast<const framework::OperatorWithKernel*>(op_base)
->GetKernelTypeForVar( ->GetKernelTypeForVar(
parameter_name, parameter_name, *tensor_in, phi_kernelkey);
*tensor_in, if (has_infer_varkernel_fn) {
framework::TransOpKernelTypeToPhiKernelKey( infer_varkernel_context.SetVarName(
expected_kernel_key)); const_cast<std::string*>(&parameter_name));
infer_varkernel_context.SetDenseTensor(
const_cast<phi::DenseTensor*>(tensor_in));
kernel_key_for_var = phi_kernel->get_kerneltype_forvar_fn_(
&infer_varkernel_context);
}
std::unique_ptr<phi::KernelKey> std::unique_ptr<phi::KernelKey>
expected_kernel_key_for_argument_def = nullptr; expected_kernel_key_for_argument_def = nullptr;
if (argument_def && if (argument_def &&
...@@ -634,7 +649,6 @@ void ApplyDataTransform(const OpKernelType& expected_kernel_key, ...@@ -634,7 +649,6 @@ void ApplyDataTransform(const OpKernelType& expected_kernel_key,
} }
}; };
phi::Kernel* phi_kernel = op_func_node->phi_kernel_;
if (phi_kernel && phi_kernel->IsValid() && if (phi_kernel && phi_kernel->IsValid() &&
phi_kernel->GetKernelRegisteredType() == phi_kernel->GetKernelRegisteredType() ==
phi::KernelRegisteredType::FUNCTION) { phi::KernelRegisteredType::FUNCTION) {
......
...@@ -37,6 +37,7 @@ limitations under the License. */ ...@@ -37,6 +37,7 @@ limitations under the License. */
#include "paddle/fluid/platform/profiler/supplement_tracing.h" #include "paddle/fluid/platform/profiler/supplement_tracing.h"
#include "paddle/phi/common/int_array.h" #include "paddle/phi/common/int_array.h"
#include "paddle/phi/common/scalar.h" #include "paddle/phi/common/scalar.h"
#include "paddle/phi/core/compat/get_kerneltype_forvar_utils.h"
#include "paddle/phi/core/ddim.h" #include "paddle/phi/core/ddim.h"
#include "paddle/phi/core/kernel_context.h" #include "paddle/phi/core/kernel_context.h"
#include "paddle/phi/core/kernel_factory.h" #include "paddle/phi/core/kernel_factory.h"
...@@ -2448,6 +2449,16 @@ Scope* OperatorWithKernel::PrepareData( ...@@ -2448,6 +2449,16 @@ Scope* OperatorWithKernel::PrepareData(
} }
} }
auto has_infer_varkernel_fn =
(run_phi_kernel_ && phi_kernel_->get_kerneltype_forvar_fn_ != nullptr);
phi::AttributeMap infer_attrs{};
auto fluid_attrs = Attrs();
phi::GetKernelTypeForVarContext infer_varkernel_context =
BuildGetKernelTypeForVarContext(expected_kernel_key,
fluid_attrs,
&infer_attrs,
has_infer_varkernel_fn);
const auto& name_map = Inputs(); const auto& name_map = Inputs();
auto prepare_input_data = [&](const std::string& in_name, auto prepare_input_data = [&](const std::string& in_name,
std::vector<Variable*>* in_vars, std::vector<Variable*>* in_vars,
...@@ -2510,6 +2521,13 @@ Scope* OperatorWithKernel::PrepareData( ...@@ -2510,6 +2521,13 @@ Scope* OperatorWithKernel::PrepareData(
auto kernel_type_for_var = auto kernel_type_for_var =
GetKernelTypeForVar(in_name, *tensor_in, expected_kernel_key); GetKernelTypeForVar(in_name, *tensor_in, expected_kernel_key);
if (has_infer_varkernel_fn) {
infer_varkernel_context.SetVarName(const_cast<std::string*>(&in_name));
infer_varkernel_context.SetDenseTensor(
const_cast<phi::DenseTensor*>(tensor_in));
kernel_type_for_var =
phi_kernel_->get_kerneltype_forvar_fn_(&infer_varkernel_context);
}
bool need_trans_dtype = bool need_trans_dtype =
NeedTransformDataType(expected_kernel_key, kernel_type_for_var); NeedTransformDataType(expected_kernel_key, kernel_type_for_var);
bool need_trans_layout = NeedTransformLayout( bool need_trans_layout = NeedTransformLayout(
......
此差异已折叠。
...@@ -118,6 +118,36 @@ ...@@ -118,6 +118,36 @@
func : atanh_grad func : atanh_grad
inplace : (out_grad -> x_grad) inplace : (out_grad -> x_grad)
- backward_op : bicubic_interp_grad
forward : bicubic_interp (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_layout="NCHW", int out_d=0, int out_h=0, int out_w=0, float[] scale={}, str interp_method="bilinear", bool align_corners=true, int align_mode=1) -> Tensor(output)
args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, Tensor output_grad, str data_layout, int out_d, int out_h, int out_w, float[] scale, str interp_method, bool align_corners, int align_mode)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param: [x]
optional: out_size, size_tensor, scale_tensor
no_need_buffer : x
kernel :
func : bicubic_interp_grad
data_type : output_grad
data_transform :
skip_transform : out_size, size_tensor, scale_tensor
- backward_op : bilinear_interp_grad
forward : bilinear_interp (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_layout="NCHW", int out_d=0, int out_h=0, int out_w=0, float[] scale={}, str interp_method="bilinear", bool align_corners=true, int align_mode=1) -> Tensor(output)
args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, Tensor output_grad, str data_layout, int out_d, int out_h, int out_w, float[] scale, str interp_method, bool align_corners, int align_mode)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param: [x]
no_need_buffer : x
optional: out_size, size_tensor, scale_tensor
kernel :
func : bilinear_interp_grad
data_type : output_grad
data_transform :
skip_transform : out_size, size_tensor, scale_tensor
- backward_op : bmm_grad - backward_op : bmm_grad
forward : bmm (Tensor x, Tensor y) -> Tensor(out) forward : bmm (Tensor x, Tensor y) -> Tensor(out)
args : (Tensor x, Tensor y, Tensor out_grad) args : (Tensor x, Tensor y, Tensor out_grad)
...@@ -763,6 +793,21 @@ ...@@ -763,6 +793,21 @@
kernel : kernel :
func : lgamma_grad func : lgamma_grad
- backward_op : linear_interp_grad
forward : linear_interp (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_layout="NCHW", int out_d=0, int out_h=0, int out_w=0, float[] scale={}, str interp_method="bilinear", bool align_corners=true, int align_mode=1) -> Tensor(output)
args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, Tensor output_grad, str data_layout, int out_d, int out_h, int out_w, float[] scale, str interp_method, bool align_corners, int align_mode)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param: [x]
optional: out_size, size_tensor, scale_tensor
no_need_buffer : x
kernel :
func : linear_interp_grad
data_type : output_grad
data_transform :
skip_transform : out_size, size_tensor, scale_tensor
- backward_op : log10_grad - backward_op : log10_grad
forward : log10 (Tensor x) -> Tensor(out) forward : log10 (Tensor x) -> Tensor(out)
args : (Tensor x, Tensor out_grad) args : (Tensor x, Tensor out_grad)
...@@ -943,6 +988,21 @@ ...@@ -943,6 +988,21 @@
kernel : kernel :
func : mv_grad func : mv_grad
- backward_op : nearest_interp_grad
forward : nearest_interp (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_layout="NCHW", int out_d=0, int out_h=0, int out_w=0, float[] scale={}, str interp_method="bilinear", bool align_corners=true, int align_mode=1) -> Tensor(output)
args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, Tensor output_grad, str data_layout, int out_d, int out_h, int out_w, float[] scale, str interp_method, bool align_corners, int align_mode)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param: [x]
optional: out_size, size_tensor, scale_tensor
no_need_buffer : x
kernel :
func : nearest_interp_grad
data_type : output_grad
data_transform :
skip_transform : out_size, size_tensor, scale_tensor
- backward_op : nll_loss_grad - backward_op : nll_loss_grad
forward : nll_loss (Tensor input, Tensor label, Tensor weight, int64_t ignore_index = -100, str reduction = "mean") -> Tensor(out), Tensor(total_weight) forward : nll_loss (Tensor input, Tensor label, Tensor weight, int64_t ignore_index = -100, str reduction = "mean") -> Tensor(out), Tensor(total_weight)
args : (Tensor input, Tensor label, Tensor weight, Tensor total_weight, Tensor out_grad, int64_t ignore_index, str reduction) args : (Tensor input, Tensor label, Tensor weight, Tensor total_weight, Tensor out_grad, int64_t ignore_index, str reduction)
...@@ -1576,6 +1636,21 @@ ...@@ -1576,6 +1636,21 @@
data_type : out_grad data_type : out_grad
no_need_buffer : x no_need_buffer : x
- backward_op : trilinear_interp_grad
forward : trilinear_interp (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_layout="NCHW", int out_d=0, int out_h=0, int out_w=0, float[] scale={}, str interp_method="bilinear", bool align_corners=true, int align_mode=1) -> Tensor(output)
args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, Tensor output_grad, str data_layout, int out_d, int out_h, int out_w, float[] scale, str interp_method, bool align_corners, int align_mode)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param: [x]
optional: out_size, size_tensor, scale_tensor
no_need_buffer : x
kernel :
func : trilinear_interp_grad
data_type : output_grad
data_transform :
skip_transform : out_size, size_tensor, scale_tensor
- backward_op : trunc_grad - backward_op : trunc_grad
forward : trunc (Tensor input) -> Tensor(out) forward : trunc (Tensor input) -> Tensor(out)
args : (Tensor out_grad) args : (Tensor out_grad)
......
...@@ -143,30 +143,6 @@ ...@@ -143,30 +143,6 @@
func : bce_loss_grad func : bce_loss_grad
inplace : (out_grad -> input_grad) inplace : (out_grad -> input_grad)
- backward_op : bicubic_interp_grad
forward : bicubic_interp (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_layout, int out_d, int out_h, int out_w, float[] scale, str interp_method, bool align_corners, int align_mode) -> Tensor(output)
args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, Tensor output_grad, str data_layout, int out_d, int out_h, int out_w, float[] scale, str interp_method, bool align_corners, int align_mode)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param: [x]
optional: out_size, size_tensor, scale_tensor
kernel :
func : bicubic_interp_grad
data_type : output_grad
- backward_op : bilinear_interp_grad
forward : bilinear_interp (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_layout, int out_d, int out_h, int out_w, float[] scale, str interp_method, bool align_corners, int align_mode) -> Tensor(output)
args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, Tensor output_grad, str data_layout, int out_d, int out_h, int out_w, float[] scale, str interp_method, bool align_corners, int align_mode)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param: [x]
optional: out_size, size_tensor, scale_tensor
kernel :
func : bilinear_interp_grad
data_type : output_grad
- backward_op : bilinear_tensor_product_grad - backward_op : bilinear_tensor_product_grad
forward : bilinear_tensor_product (Tensor x, Tensor y, Tensor weight, Tensor bias) -> Tensor(out) forward : bilinear_tensor_product (Tensor x, Tensor y, Tensor weight, Tensor bias) -> Tensor(out)
args : (Tensor x, Tensor y, Tensor weight, Tensor out_grad) args : (Tensor x, Tensor y, Tensor weight, Tensor out_grad)
...@@ -654,18 +630,6 @@ ...@@ -654,18 +630,6 @@
no_need_buffer : bias no_need_buffer : bias
optional : scale, bias optional : scale, bias
- backward_op : linear_interp_grad
forward : linear_interp (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_layout, int out_d, int out_h, int out_w, float[] scale, str interp_method, bool align_corners, int align_mode) -> Tensor(output)
args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, Tensor output_grad, str data_layout, int out_d, int out_h, int out_w, float[] scale, str interp_method, bool align_corners, int align_mode)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param: [x]
optional: out_size, size_tensor, scale_tensor
kernel :
func : linear_interp_grad
data_type : output_grad
- backward_op : log_softmax_grad - backward_op : log_softmax_grad
forward : log_softmax(Tensor x, int axis) -> Tensor(out) forward : log_softmax(Tensor x, int axis) -> Tensor(out)
args : (Tensor out, Tensor out_grad, int axis) args : (Tensor out, Tensor out_grad, int axis)
...@@ -884,18 +848,6 @@ ...@@ -884,18 +848,6 @@
func : multiply_triple_grad func : multiply_triple_grad
optional : fwd_grad_grad_x, fwd_grad_grad_y, grad_x_grad, grad_y_grad, grad_grad_out_grad optional : fwd_grad_grad_x, fwd_grad_grad_y, grad_x_grad, grad_y_grad, grad_grad_out_grad
- backward_op : nearest_interp_grad
forward : nearest_interp (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_layout, int out_d, int out_h, int out_w, float[] scale, str interp_method, bool align_corners, int align_mode) -> Tensor(output)
args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, Tensor output_grad, str data_layout, int out_d, int out_h, int out_w, float[] scale, str interp_method, bool align_corners, int align_mode)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param: [x]
optional: out_size, size_tensor, scale_tensor
kernel :
func : nearest_interp_grad
data_type : output_grad
- backward_op : norm_grad - backward_op : norm_grad
forward : norm (Tensor x, int axis, float epsilon, bool is_test) -> Tensor(out), Tensor(norm) forward : norm (Tensor x, int axis, float epsilon, bool is_test) -> Tensor(out), Tensor(norm)
args : (Tensor x, Tensor norm, Tensor out_grad, int axis, float epsilon, bool is_test) args : (Tensor x, Tensor norm, Tensor out_grad, int axis, float epsilon, bool is_test)
...@@ -1370,18 +1322,6 @@ ...@@ -1370,18 +1322,6 @@
kernel : kernel :
func : tril_grad func : tril_grad
- backward_op : trilinear_interp_grad
forward : trilinear_interp (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_layout, int out_d, int out_h, int out_w, float[] scale, str interp_method, bool align_corners, int align_mode) -> Tensor(output)
args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, Tensor output_grad, str data_layout, int out_d, int out_h, int out_w, float[] scale, str interp_method, bool align_corners, int align_mode)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param: [x]
optional: out_size, size_tensor, scale_tensor
kernel :
func : trilinear_interp_grad
data_type : output_grad
- backward_op : triu_grad - backward_op : triu_grad
forward : triu(Tensor x, int diagonal) -> Tensor(out) forward : triu(Tensor x, int diagonal) -> Tensor(out)
args : (Tensor out_grad, int diagonal) args : (Tensor out_grad, int diagonal)
......
...@@ -241,28 +241,6 @@ ...@@ -241,28 +241,6 @@
func : bce_loss func : bce_loss
backward : bce_loss_grad backward : bce_loss_grad
- op : bicubic_interp
args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_layout, int out_d, int out_h, int out_w, float[] scale, str interp_method, bool align_corners, int align_mode)
output : Tensor(output)
infer_meta :
func : InterpolateInferMeta
optional: out_size, size_tensor, scale_tensor
kernel :
func : bicubic_interp
data_type : x
backward : bicubic_interp_grad
- op : bilinear_interp
args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_layout, int out_d, int out_h, int out_w, float[] scale, str interp_method, bool align_corners, int align_mode)
output : Tensor(output)
infer_meta :
func : InterpolateInferMeta
optional: out_size, size_tensor, scale_tensor
kernel :
func : bilinear_interp
data_type : x
backward : bilinear_interp_grad
- op : bilinear_tensor_product - op : bilinear_tensor_product
args : (Tensor x, Tensor y, Tensor weight, Tensor bias) args : (Tensor x, Tensor y, Tensor weight, Tensor bias)
output : Tensor output : Tensor
...@@ -933,17 +911,6 @@ ...@@ -933,17 +911,6 @@
kernel : kernel :
func : less_than func : less_than
- op : linear_interp
args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_layout, int out_d, int out_h, int out_w, float[] scale, str interp_method, bool align_corners, int align_mode)
output : Tensor(output)
infer_meta :
func : InterpolateInferMeta
optional: out_size, size_tensor, scale_tensor
kernel :
func : linear_interp
data_type : x
backward : linear_interp_grad
- op : linspace - op : linspace
args : (Tensor start, Tensor stop, Tensor number, DataType dtype, Place place) args : (Tensor start, Tensor stop, Tensor number, DataType dtype, Place place)
output : Tensor(out) output : Tensor(out)
...@@ -1231,17 +1198,6 @@ ...@@ -1231,17 +1198,6 @@
multiply_sr {selected_rows, dense -> selected_rows} multiply_sr {selected_rows, dense -> selected_rows}
backward : multiply_grad backward : multiply_grad
- op : nearest_interp
args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_layout, int out_d, int out_h, int out_w, float[] scale, str interp_method, bool align_corners, int align_mode)
output : Tensor(output)
infer_meta :
func : InterpolateInferMeta
optional: out_size, size_tensor, scale_tensor
kernel :
func : nearest_interp
data_type : x
backward : nearest_interp_grad
- op : nms - op : nms
args : (Tensor x, float threshold) args : (Tensor x, float threshold)
output : Tensor(out) output : Tensor(out)
...@@ -1733,17 +1689,6 @@ ...@@ -1733,17 +1689,6 @@
data_type : dtype data_type : dtype
backend : place backend : place
- op : trilinear_interp
args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_layout, int out_d, int out_h, int out_w, float[] scale, str interp_method, bool align_corners, int align_mode)
output : Tensor(output)
infer_meta :
func : InterpolateInferMeta
optional: out_size, size_tensor, scale_tensor
kernel :
func : trilinear_interp
data_type : x
backward : trilinear_interp_grad
- op : triu - op : triu
args : (Tensor x, int diagonal) args : (Tensor x, int diagonal)
output : Tensor(out) output : Tensor(out)
......
...@@ -163,11 +163,19 @@ ...@@ -163,11 +163,19 @@
- op : bicubic_interp (bicubic_interp_v2) - op : bicubic_interp (bicubic_interp_v2)
backward : bicubic_interp_grad (bicubic_interp_v2_grad) backward : bicubic_interp_grad (bicubic_interp_v2_grad)
inputs :
{x : X, out_size : OutSize, size_tensor : SizeTensor, scale_tensor : Scale}
outputs :
output : Out
extra : extra :
attrs : [bool use_mkldnn = false] attrs : [bool use_mkldnn = false]
- op : bilinear_interp (bilinear_interp_v2) - op : bilinear_interp (bilinear_interp_v2)
backward : bilinear_interp_grad (bilinear_interp_v2_grad) backward : bilinear_interp_grad (bilinear_interp_v2_grad)
inputs :
{x : X, out_size : OutSize, size_tensor : SizeTensor, scale_tensor : Scale}
outputs :
output : Out
extra : extra :
attrs : [bool use_mkldnn = false] attrs : [bool use_mkldnn = false]
...@@ -983,6 +991,10 @@ ...@@ -983,6 +991,10 @@
- op : linear_interp (linear_interp_v2) - op : linear_interp (linear_interp_v2)
backward : linear_interp_grad (linear_interp_v2_grad) backward : linear_interp_grad (linear_interp_v2_grad)
inputs :
{x : X, out_size : OutSize, size_tensor : SizeTensor, scale_tensor : Scale}
outputs :
output : Out
extra : extra :
attrs : [bool use_mkldnn = false] attrs : [bool use_mkldnn = false]
...@@ -1205,6 +1217,10 @@ ...@@ -1205,6 +1217,10 @@
- op : nearest_interp (nearest_interp_v2) - op : nearest_interp (nearest_interp_v2)
backward : nearest_interp_grad (nearest_interp_v2_grad) backward : nearest_interp_grad (nearest_interp_v2_grad)
inputs :
{x : X, out_size : OutSize, size_tensor : SizeTensor, scale_tensor : Scale}
outputs :
output : Out
extra : extra :
attrs : [bool use_mkldnn = false] attrs : [bool use_mkldnn = false]
...@@ -1800,6 +1816,10 @@ ...@@ -1800,6 +1816,10 @@
- op : trilinear_interp (trilinear_interp_v2) - op : trilinear_interp (trilinear_interp_v2)
backward : trilinear_interp_grad (trilinear_interp_v2_grad) backward : trilinear_interp_grad (trilinear_interp_v2_grad)
inputs :
{x : X, out_size : OutSize, size_tensor : SizeTensor, scale_tensor : Scale}
outputs :
output : Out
extra : extra :
attrs : [bool use_mkldnn = false] attrs : [bool use_mkldnn = false]
......
...@@ -125,6 +125,32 @@ ...@@ -125,6 +125,32 @@
kernel : kernel :
func : bernoulli func : bernoulli
- op : bicubic_interp
args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_layout="NCHW", int out_d=0, int out_h=0, int out_w=0, float[] scale={}, str interp_method="bilinear", bool align_corners=true, int align_mode=1)
output : Tensor(output)
infer_meta :
func : InterpolateInferMeta
optional: out_size, size_tensor, scale_tensor
kernel :
func : bicubic_interp
data_type : x
backward : bicubic_interp_grad
data_transform :
skip_transform : out_size, size_tensor, scale_tensor
- op : bilinear_interp
args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_layout="NCHW", int out_d=0, int out_h=0, int out_w=0, float[] scale={}, str interp_method="bilinear", bool align_corners=true, int align_mode=1)
output : Tensor(output)
infer_meta :
func : InterpolateInferMeta
optional: out_size, size_tensor, scale_tensor
kernel :
func : bilinear_interp
data_type : x
backward : bilinear_interp_grad
data_transform :
skip_transform : out_size, size_tensor, scale_tensor
- op : bitwise_and - op : bitwise_and
args : (Tensor x, Tensor y) args : (Tensor x, Tensor y)
output : Tensor(out) output : Tensor(out)
...@@ -780,6 +806,19 @@ ...@@ -780,6 +806,19 @@
func : lgamma func : lgamma
backward : lgamma_grad backward : lgamma_grad
- op : linear_interp
args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_layout="NCHW", int out_d=0, int out_h=0, int out_w=0, float[] scale={}, str interp_method="bilinear", bool align_corners=true, int align_mode=1)
output : Tensor(output)
infer_meta :
func : InterpolateInferMeta
optional: out_size, size_tensor, scale_tensor
kernel :
func : linear_interp
data_type : x
backward : linear_interp_grad
data_transform :
skip_transform : out_size, size_tensor, scale_tensor
- op : log - op : log
args : (Tensor x) args : (Tensor x)
output : Tensor output : Tensor
...@@ -938,6 +977,19 @@ ...@@ -938,6 +977,19 @@
func : mv func : mv
backward : mv_grad backward : mv_grad
- op : nearest_interp
args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_layout="NCHW", int out_d=0, int out_h=0, int out_w=0, float[] scale={}, str interp_method="bilinear", bool align_corners=true, int align_mode=1)
output : Tensor(output)
infer_meta :
func : InterpolateInferMeta
optional: out_size, size_tensor, scale_tensor
kernel :
func : nearest_interp
data_type : x
backward : nearest_interp_grad
data_transform :
skip_transform : out_size, size_tensor, scale_tensor
- op : nll_loss - op : nll_loss
args : (Tensor input, Tensor label, Tensor weight, int64_t ignore_index = -100, str reduction = "mean") args : (Tensor input, Tensor label, Tensor weight, int64_t ignore_index = -100, str reduction = "mean")
output : Tensor(out), Tensor(total_weight) output : Tensor(out), Tensor(total_weight)
...@@ -1406,6 +1458,19 @@ ...@@ -1406,6 +1458,19 @@
func : trace func : trace
backward : trace_grad backward : trace_grad
- op : trilinear_interp
args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_layout="NCHW", int out_d=0, int out_h=0, int out_w=0, float[] scale={}, str interp_method="bilinear", bool align_corners=true, int align_mode=1)
output : Tensor(output)
infer_meta :
func : InterpolateInferMeta
optional: out_size, size_tensor, scale_tensor
kernel :
func : trilinear_interp
data_type : x
backward : trilinear_interp_grad
data_transform :
skip_transform : out_size, size_tensor, scale_tensor
- op : trunc - op : trunc
args : (Tensor input) args : (Tensor input)
output : Tensor output : Tensor
......
...@@ -101,6 +101,7 @@ cc_library( ...@@ -101,6 +101,7 @@ cc_library(
infermeta_utils infermeta_utils
SRCS infermeta_utils.cc SRCS infermeta_utils.cc
DEPS meta_tensor) DEPS meta_tensor)
cc_library( cc_library(
selected_rows selected_rows
SRCS selected_rows_impl.cc selected_rows.cc SRCS selected_rows_impl.cc selected_rows.cc
......
...@@ -6,6 +6,7 @@ cc_library( ...@@ -6,6 +6,7 @@ cc_library(
op_utils op_utils
SRCS op_utils.cc SRCS op_utils.cc
DEPS arg_map_context enforce) DEPS arg_map_context enforce)
cc_library(get_kerneltype_forvar_utils SRCS get_kerneltype_forvar_utils.cc)
set(convert_utils_deps data_type place op_utils phi_backends) set(convert_utils_deps data_type place op_utils phi_backends)
......
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/core/compat/get_kerneltype_forvar_utils.h"
namespace phi {
const std::string& GetKernelTypeForVarContext::GetVarName(void) const {
return *var_name_;
}
const DenseTensor& GetKernelTypeForVarContext::GetTensor(void) const {
return *tensor_;
}
const KernelKey& GetKernelTypeForVarContext::GetKernelKey(void) const {
return *kernel_key_;
}
const AttributeMap& GetKernelTypeForVarContext::GetAttrs(void) const {
return *attrs_;
}
void GetKernelTypeForVarContext::SetVarName(std::string* var_name) {
this->var_name_ = var_name;
}
void GetKernelTypeForVarContext::SetDenseTensor(DenseTensor* tensor) {
this->tensor_ = tensor;
}
} // namespace phi
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "paddle/phi/core/attribute.h"
namespace phi {
class KernelKey;
class DenseTensor;
/**
* Note: GetKernelTypeForVarContext is currently designed to MKLDNN kernel when
* the related memeber function 'GetKernelTypeForVar' is special. It is
* possiable to uesed for other custom hardwares in the future.
*/
class GetKernelTypeForVarContext {
public:
GetKernelTypeForVarContext() = default;
GetKernelTypeForVarContext(const GetKernelTypeForVarContext&) = default;
explicit GetKernelTypeForVarContext(const phi::KernelKey* kernel_key,
const AttributeMap* attrs)
: kernel_key_(kernel_key), attrs_(attrs) {}
const std::string& GetVarName(void) const;
const DenseTensor& GetTensor(void) const;
const KernelKey& GetKernelKey(void) const;
const AttributeMap& GetAttrs(void) const;
void SetVarName(std::string* var_name);
void SetDenseTensor(DenseTensor* tensor);
private:
const KernelKey* kernel_key_; // not owned
// Use AttributeMap in namespace 'phi' to avoid depending 'fuild'
const AttributeMap* attrs_; // not owned
std::string* var_name_; // not owned
DenseTensor* tensor_; // not owned
};
typedef KernelKey (*GetKernelTypeForVarFn)(const GetKernelTypeForVarContext*);
} // namespace phi
...@@ -24,12 +24,12 @@ ...@@ -24,12 +24,12 @@
#include "paddle/phi/common/data_type.h" #include "paddle/phi/common/data_type.h"
#include "paddle/phi/common/layout.h" #include "paddle/phi/common/layout.h"
#include "paddle/phi/core/compat/convert_utils.h" #include "paddle/phi/core/compat/convert_utils.h"
#include "paddle/phi/core/compat/get_kerneltype_forvar_utils.h"
#include "paddle/phi/core/enforce.h" #include "paddle/phi/core/enforce.h"
#include "paddle/phi/core/type_defs.h" #include "paddle/phi/core/type_defs.h"
#include "paddle/phi/core/utils/data_type.h" #include "paddle/phi/core/utils/data_type.h"
#include "paddle/utils/flat_hash_map.h" #include "paddle/utils/flat_hash_map.h"
#include "paddle/utils/small_vector.h" #include "paddle/utils/small_vector.h"
namespace phi { namespace phi {
using DataType = paddle::experimental::DataType; using DataType = paddle::experimental::DataType;
...@@ -286,6 +286,8 @@ class Kernel { ...@@ -286,6 +286,8 @@ class Kernel {
return kernel_registered_type_; return kernel_registered_type_;
} }
GetKernelTypeForVarFn get_kerneltype_forvar_fn_{nullptr};
private: private:
KernelFn fn_{nullptr}; KernelFn fn_{nullptr};
void* variadic_fn_ = nullptr; void* variadic_fn_ = nullptr;
......
...@@ -154,7 +154,12 @@ file(GLOB kernel_xpu "xpu/*.cc" "selected_rows/xpu/*.cc" "fusion/xpu/*.cc") ...@@ -154,7 +154,12 @@ file(GLOB kernel_xpu "xpu/*.cc" "selected_rows/xpu/*.cc" "fusion/xpu/*.cc")
add_library(phi_cpu ${kernel_cc}) add_library(phi_cpu ${kernel_cc})
kernel_declare("${kernel_cc}") kernel_declare("${kernel_cc}")
target_link_libraries(phi_cpu ${COMMON_KERNEL_DEPS}) if(WITH_MKLDNN)
target_link_libraries(phi_cpu ${COMMON_KERNEL_DEPS}
get_kerneltype_forvar_utils)
else()
target_link_libraries(phi_cpu ${COMMON_KERNEL_DEPS})
endif()
set(ADD_PHI_KERNELS phi_cpu) set(ADD_PHI_KERNELS phi_cpu)
......
...@@ -15,11 +15,41 @@ ...@@ -15,11 +15,41 @@
#include "paddle/phi/kernels/interpolate_kernel.h" #include "paddle/phi/kernels/interpolate_kernel.h"
#include "paddle/phi/backends/onednn/onednn_reuse.h" #include "paddle/phi/backends/onednn/onednn_reuse.h"
#include "paddle/phi/core/compat/get_kerneltype_forvar_utils.h"
#include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/interpolate_function.h" #include "paddle/phi/kernels/funcs/interpolate_function.h"
namespace phi { namespace phi {
KernelKey InterpolateGetKernelTypeForVar(
const GetKernelTypeForVarContext* ctx) {
const std::string& var_name = ctx->GetVarName();
const DenseTensor& tensor = ctx->GetTensor();
const KernelKey& expected_kernel_type = ctx->GetKernelKey();
const AttributeMap& attrs = ctx->GetAttrs();
// Only input require reshaping, weights and
// bias are having shape in NCHW order
if ((expected_kernel_type.layout() == DataLayout::ONEDNN) &&
(tensor.layout() != DataLayout::ONEDNN)) {
auto it = attrs.find("data_layout");
const std::string data_layout = PADDLE_GET_CONST(std::string, it->second);
auto dl = StringToDataLayout(data_layout);
// Some models may have intentionally set "AnyLayout" for pool
// op. Treat this as NCHW (default data_format value)
if (dl != DataLayout::kAnyLayout) {
return KernelKey(tensor.place(), dl, expected_kernel_type.dtype());
}
}
if (var_name == "OutSize" || var_name == "SizeTensor" ||
var_name == "Scale") {
return KernelKey(Backend::ALL_BACKEND,
expected_kernel_type.layout(),
expected_kernel_type.dtype());
}
return KernelKey(
tensor.place(), tensor.layout(), expected_kernel_type.dtype());
}
namespace funcs { namespace funcs {
template <typename T = float> template <typename T = float>
class InterpolateOneDNNHandler class InterpolateOneDNNHandler
...@@ -233,7 +263,9 @@ PD_REGISTER_KERNEL(bilinear_interp, ...@@ -233,7 +263,9 @@ PD_REGISTER_KERNEL(bilinear_interp,
phi::BilinearInterpKernel, phi::BilinearInterpKernel,
float, float,
phi::dtype::bfloat16, phi::dtype::bfloat16,
phi::dtype::float16) {} phi::dtype::float16) {
kernel->get_kerneltype_forvar_fn_ = phi::InterpolateGetKernelTypeForVar;
}
PD_REGISTER_KERNEL(nearest_interp, PD_REGISTER_KERNEL(nearest_interp,
OneDNN, OneDNN,
...@@ -243,4 +275,6 @@ PD_REGISTER_KERNEL(nearest_interp, ...@@ -243,4 +275,6 @@ PD_REGISTER_KERNEL(nearest_interp,
phi::dtype::bfloat16, phi::dtype::bfloat16,
phi::dtype::float16, phi::dtype::float16,
int8_t, int8_t,
uint8_t) {} uint8_t) {
kernel->get_kerneltype_forvar_fn_ = phi::InterpolateGetKernelTypeForVar;
}
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/phi/core/compat/op_utils.h"
namespace phi {
KernelSignature BilinearInterpOpArgumentMapping(
const ArgumentMappingContext& ctx) {
return KernelSignature("bilinear_interp",
{"X", "OutSize", "SizeTensor", "Scale"},
{"data_layout",
"out_d",
"out_h",
"out_w",
"scale",
"interp_method",
"align_corners",
"align_mode"},
{"Out"});
}
KernelSignature NearestInterpOpArgumentMapping(
const ArgumentMappingContext& ctx) {
return KernelSignature("nearest_interp",
{"X", "OutSize", "SizeTensor", "Scale"},
{"data_layout",
"out_d",
"out_h",
"out_w",
"scale",
"interp_method",
"align_corners",
"align_mode"},
{"Out"});
}
KernelSignature TrilinearInterpOpArgumentMapping(
const ArgumentMappingContext& ctx) {
return KernelSignature("trilinear_interp",
{"X", "OutSize", "SizeTensor", "Scale"},
{"data_layout",
"out_d",
"out_h",
"out_w",
"scale",
"interp_method",
"align_corners",
"align_mode"},
{"Out"});
}
KernelSignature LinearInterpOpArgumentMapping(
const ArgumentMappingContext& ctx) {
return KernelSignature("linear_interp",
{"X", "OutSize", "SizeTensor", "Scale"},
{"data_layout",
"out_d",
"out_h",
"out_w",
"scale",
"interp_method",
"align_corners",
"align_mode"},
{"Out"});
}
KernelSignature BicubicInterpOpArgumentMapping(
const ArgumentMappingContext& ctx) {
return KernelSignature("bicubic_interp",
{"X", "OutSize", "SizeTensor", "Scale"},
{"data_layout",
"out_d",
"out_h",
"out_w",
"scale",
"interp_method",
"align_corners",
"align_mode"},
{"Out"});
}
KernelSignature BilinearInterpGradOpArgumentMapping(
const ArgumentMappingContext& ctx) {
return KernelSignature("bilinear_interp_grad",
{"X", "OutSize", "SizeTensor", "Scale", "Out@GRAD"},
{"data_layout",
"out_d",
"out_h",
"out_w",
"scale",
"interp_method",
"align_corners",
"align_mode"},
{"X@GRAD"});
}
KernelSignature NearestInterpGradOpArgumentMapping(
const ArgumentMappingContext& ctx) {
return KernelSignature("nearest_interp_grad",
{"X", "OutSize", "SizeTensor", "Scale", "Out@GRAD"},
{"data_layout",
"out_d",
"out_h",
"out_w",
"scale",
"interp_method",
"align_corners",
"align_mode"},
{"X@GRAD"});
}
KernelSignature TrilinearInterpGradOpArgumentMapping(
const ArgumentMappingContext& ctx) {
return KernelSignature("trilinear_interp_grad",
{"X", "OutSize", "SizeTensor", "Scale", "Out@GRAD"},
{"data_layout",
"out_d",
"out_h",
"out_w",
"scale",
"interp_method",
"align_corners",
"align_mode"},
{"X@GRAD"});
}
KernelSignature LinearInterpGradOpArgumentMapping(
const ArgumentMappingContext& ctx) {
return KernelSignature("linear_interp_grad",
{"X", "OutSize", "SizeTensor", "Scale", "Out@GRAD"},
{"data_layout",
"out_d",
"out_h",
"out_w",
"scale",
"interp_method",
"align_corners",
"align_mode"},
{"X@GRAD"});
}
KernelSignature BicubicInterpGradOpArgumentMapping(
const ArgumentMappingContext& ctx) {
return KernelSignature("bicubic_interp_grad",
{"X", "OutSize", "SizeTensor", "Scale", "Out@GRAD"},
{"data_layout",
"out_d",
"out_h",
"out_w",
"scale",
"interp_method",
"align_corners",
"align_mode"},
{"X@GRAD"});
}
} // namespace phi
PD_REGISTER_BASE_KERNEL_NAME(linear_interp_v2, linear_interp);
PD_REGISTER_BASE_KERNEL_NAME(bilinear_interp_v2, bilinear_interp);
PD_REGISTER_BASE_KERNEL_NAME(trilinear_interp_v2, trilinear_interp);
PD_REGISTER_BASE_KERNEL_NAME(nearest_interp_v2, nearest_interp);
PD_REGISTER_BASE_KERNEL_NAME(bicubic_interp_v2, bicubic_interp);
PD_REGISTER_BASE_KERNEL_NAME(linear_interp_v2_grad, linear_interp_grad);
PD_REGISTER_BASE_KERNEL_NAME(bilinear_interp_v2_grad, bilinear_interp_grad);
PD_REGISTER_BASE_KERNEL_NAME(trilinear_interp_v2_grad, trilinear_interp_grad);
PD_REGISTER_BASE_KERNEL_NAME(nearest_interp_v2_grad, nearest_interp_grad);
PD_REGISTER_BASE_KERNEL_NAME(bicubic_interp_v2_grad, bicubic_interp_grad);
PD_REGISTER_ARG_MAPPING_FN(bilinear_interp_v2,
phi::BilinearInterpOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(nearest_interp_v2,
phi::NearestInterpOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(trilinear_interp_v2,
phi::TrilinearInterpOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(linear_interp_v2,
phi::LinearInterpOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(bicubic_interp_v2,
phi::BicubicInterpOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(bilinear_interp_v2_grad,
phi::BilinearInterpGradOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(nearest_interp_v2_grad,
phi::NearestInterpGradOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(trilinear_interp_v2_grad,
phi::TrilinearInterpGradOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(linear_interp_v2_grad,
phi::LinearInterpGradOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(bicubic_interp_v2_grad,
phi::BicubicInterpGradOpArgumentMapping);
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册