未验证 提交 adb61b7b 编写于 作者: Z zyfncg 提交者: GitHub

Remove some custom_impl api (#45066)

* remove some custom_impl api and make them generated by yaml completely

* delete useless code

* fix adamw bug

* fix infermeta

* revert adamw

* polish code

* fix bug
上级 49e2a4d8
......@@ -337,7 +337,8 @@ add_custom_command(
${api_yaml_file} ${legacy_api_yaml_file} --wrapped_infermeta_header_path
${wrapped_infermeta_header_file} --wrapped_infermeta_source_path
${wrapped_infermeta_source_file}
DEPENDS ${api_yaml_file} ${wrapped_infermeta_gen_file} ${api_gen_base}
DEPENDS ${api_yaml_file} ${legacy_api_yaml_file} ${wrapped_infermeta_gen_file}
${api_gen_base}
VERBATIM)
cc_library(
......
......@@ -198,416 +198,6 @@ std::tuple<Tensor, Tensor, Tensor, Tensor, Tensor, Tensor> adamw_impl(
return api_output;
}
Tensor conv2d_impl(const Tensor& input,
const Tensor& filter,
const std::vector<int>& strides,
const std::vector<int>& paddings,
const std::string& paddding_algorithm,
int groups,
const std::vector<int>& dilations,
const std::string& data_format,
bool use_addto,
int workspace_size_MB,
bool exhaustive_search) {
Backend kernel_backend = Backend::UNDEFINED;
DataLayout kernel_layout = DataLayout::UNDEFINED;
DataType kernel_data_type = DataType::UNDEFINED;
kernel_data_type = ParseDataType(input);
if (kernel_backend == Backend::UNDEFINED ||
kernel_layout == DataLayout::UNDEFINED ||
kernel_data_type == DataType::UNDEFINED) {
auto kernel_key_set = ParseKernelKeyByInputArgs(input, filter);
auto kernel_key = kernel_key_set.GetHighestPriorityKernelKey();
if (kernel_backend == Backend::UNDEFINED) {
kernel_backend = kernel_key.backend();
}
if (kernel_layout == DataLayout::UNDEFINED) {
kernel_layout = kernel_key.layout();
}
if (kernel_data_type == DataType::UNDEFINED) {
kernel_data_type = kernel_key.dtype();
}
}
VLOG(6) << "conv2d API kernel key: [" << kernel_backend << ", "
<< kernel_layout << ", " << kernel_data_type << "]";
auto kernel_result = phi::KernelFactory::Instance().SelectKernelOrThrowError(
"conv2d", {kernel_backend, kernel_layout, kernel_data_type}, true);
const auto& kernel = kernel_result.kernel;
VLOG(6) << "conv2d API kernel: " << kernel;
auto* dev_ctx = GetDeviceContextByBackend(kernel_backend);
phi::TensorArgDef args0 = kernel.InputAt(0);
phi::TensorArgDef args1 = kernel.InputAt(1);
if (kernel_backend == Backend::GPU) {
args0.backend = Backend::GPU;
args1.backend = Backend::GPU;
}
auto input_input = PrepareData(input, args0, {});
auto input_filter = PrepareData(filter, args1, {});
Tensor api_output;
auto kernel_out = SetKernelOutput(kernel_backend, &api_output);
phi::MetaTensor meta_out(kernel_out);
phi::ConvInferMeta(MakeMetaTensor(*input_input),
MakeMetaTensor(*input_filter),
strides,
paddings,
paddding_algorithm,
groups,
dilations,
data_format,
use_addto,
workspace_size_MB,
exhaustive_search,
&meta_out);
using kernel_signature = void (*)(const platform::DeviceContext&,
const phi::DenseTensor&,
const phi::DenseTensor&,
const std::vector<int>&,
const std::vector<int>&,
const std::string&,
int,
const std::vector<int>&,
const std::string&,
bool,
int,
bool,
phi::DenseTensor*);
auto* kernel_fn = kernel.GetVariadicKernelFn<kernel_signature>();
{
(*kernel_fn)(*dev_ctx,
*input_input,
*input_filter,
strides,
paddings,
paddding_algorithm,
groups,
dilations,
data_format,
use_addto,
workspace_size_MB,
exhaustive_search,
kernel_out);
}
return api_output;
}
Tensor conv3d_impl(const Tensor& input,
const Tensor& filter,
const std::vector<int>& strides,
const std::vector<int>& paddings,
const std::string& paddding_algorithm,
int groups,
const std::vector<int>& dilations,
const std::string& data_format,
bool use_addto,
int workspace_size_MB,
bool exhaustive_search) {
Backend kernel_backend = Backend::UNDEFINED;
DataLayout kernel_layout = DataLayout::UNDEFINED;
DataType kernel_data_type = DataType::UNDEFINED;
kernel_data_type = ParseDataType(input);
if (kernel_backend == Backend::UNDEFINED ||
kernel_layout == DataLayout::UNDEFINED ||
kernel_data_type == DataType::UNDEFINED) {
auto kernel_key_set = ParseKernelKeyByInputArgs(input, filter);
auto kernel_key = kernel_key_set.GetHighestPriorityKernelKey();
if (kernel_backend == Backend::UNDEFINED) {
kernel_backend = kernel_key.backend();
}
if (kernel_layout == DataLayout::UNDEFINED) {
kernel_layout = kernel_key.layout();
}
if (kernel_data_type == DataType::UNDEFINED) {
kernel_data_type = kernel_key.dtype();
}
}
VLOG(6) << "conv3d API kernel key: [" << kernel_backend << ", "
<< kernel_layout << ", " << kernel_data_type << "]";
auto kernel_result = phi::KernelFactory::Instance().SelectKernelOrThrowError(
"conv3d", {kernel_backend, kernel_layout, kernel_data_type}, true);
const auto& kernel = kernel_result.kernel;
VLOG(6) << "conv3d API kernel: " << kernel;
auto* dev_ctx = GetDeviceContextByBackend(kernel_backend);
phi::TensorArgDef args0 = kernel.InputAt(0);
phi::TensorArgDef args1 = kernel.InputAt(1);
if (kernel_backend == Backend::GPU) {
args0.backend = Backend::GPU;
args1.backend = Backend::GPU;
}
auto input_input = PrepareData(input, args0, {});
auto input_filter = PrepareData(filter, args1, {});
Tensor api_output;
auto kernel_out = SetKernelOutput(kernel_backend, &api_output);
phi::MetaTensor meta_out(kernel_out);
phi::ConvInferMeta(MakeMetaTensor(*input_input),
MakeMetaTensor(*input_filter),
strides,
paddings,
paddding_algorithm,
groups,
dilations,
data_format,
use_addto,
workspace_size_MB,
exhaustive_search,
&meta_out);
using kernel_signature = void (*)(const platform::DeviceContext&,
const phi::DenseTensor&,
const phi::DenseTensor&,
const std::vector<int>&,
const std::vector<int>&,
const std::string&,
int,
const std::vector<int>&,
const std::string&,
bool,
int,
bool,
phi::DenseTensor*);
auto* kernel_fn = kernel.GetVariadicKernelFn<kernel_signature>();
{
(*kernel_fn)(*dev_ctx,
*input_input,
*input_filter,
strides,
paddings,
paddding_algorithm,
groups,
dilations,
data_format,
use_addto,
workspace_size_MB,
exhaustive_search,
kernel_out);
}
return api_output;
}
void conv2d_grad_impl(const Tensor& input,
const Tensor& filter,
const Tensor& out_grad,
const std::vector<int>& strides,
const std::vector<int>& paddings,
const std::string& paddding_algorithm,
int groups,
const std::vector<int>& dilations,
const std::string& data_format,
bool use_addto,
int workspace_size_MB,
bool exhaustive_search,
Tensor* input_grad,
Tensor* filter_grad) {
Backend kernel_backend = Backend::UNDEFINED;
DataLayout kernel_layout = DataLayout::UNDEFINED;
DataType kernel_data_type = DataType::UNDEFINED;
if (kernel_backend == Backend::UNDEFINED ||
kernel_layout == DataLayout::UNDEFINED ||
kernel_data_type == DataType::UNDEFINED) {
auto kernel_key_set = ParseKernelKeyByInputArgs(input, filter, out_grad);
auto kernel_key = kernel_key_set.GetHighestPriorityKernelKey();
if (kernel_backend == Backend::UNDEFINED) {
kernel_backend = kernel_key.backend();
}
if (kernel_layout == DataLayout::UNDEFINED) {
kernel_layout = kernel_key.layout();
}
if (kernel_data_type == DataType::UNDEFINED) {
kernel_data_type = kernel_key.dtype();
}
}
VLOG(6) << "conv2d_grad API kernel key: [" << kernel_backend << ", "
<< kernel_layout << ", " << kernel_data_type << "]";
auto kernel_result = phi::KernelFactory::Instance().SelectKernelOrThrowError(
"conv2d_grad", {kernel_backend, kernel_layout, kernel_data_type}, true);
const auto& kernel = kernel_result.kernel;
VLOG(6) << "conv2d_grad API kernel: " << kernel;
auto* dev_ctx = GetDeviceContextByBackend(kernel_backend);
phi::TensorArgDef args0 = kernel.InputAt(0);
phi::TensorArgDef args1 = kernel.InputAt(1);
phi::TensorArgDef args2 = kernel.InputAt(2);
if (kernel_backend == Backend::GPU) {
args0.backend = Backend::GPU;
args1.backend = Backend::GPU;
args2.backend = Backend::GPU;
}
auto input_input = PrepareData(input, args0, {});
auto input_filter = PrepareData(filter, args1, {});
auto input_out_grad = PrepareData(out_grad, args2, {});
auto kernel_out_0 = SetKernelOutput(kernel_backend, input_grad);
auto kernel_out_1 = SetKernelOutput(kernel_backend, filter_grad);
phi::MetaTensor meta_out_0(kernel_out_0);
phi::MetaTensor meta_out_1(kernel_out_1);
phi::GeneralBinaryGradInferMeta(MakeMetaTensor(*input_input),
MakeMetaTensor(*input_filter),
kernel_out_0 ? &meta_out_0 : nullptr,
kernel_out_1 ? &meta_out_1 : nullptr);
using kernel_signature = void (*)(const platform::DeviceContext&,
const phi::DenseTensor&,
const phi::DenseTensor&,
const phi::DenseTensor&,
const std::vector<int>&,
const std::vector<int>&,
const std::string&,
int,
const std::vector<int>&,
const std::string&,
bool,
int,
bool,
phi::DenseTensor*,
phi::DenseTensor*);
auto* kernel_fn = kernel.GetVariadicKernelFn<kernel_signature>();
{
(*kernel_fn)(*dev_ctx,
*input_input,
*input_filter,
*input_out_grad,
strides,
paddings,
paddding_algorithm,
groups,
dilations,
data_format,
use_addto,
workspace_size_MB,
exhaustive_search,
kernel_out_0,
kernel_out_1);
}
}
void conv3d_grad_impl(const Tensor& input,
const Tensor& filter,
const Tensor& out_grad,
const std::vector<int>& strides,
const std::vector<int>& paddings,
const std::string& paddding_algorithm,
int groups,
const std::vector<int>& dilations,
const std::string& data_format,
bool use_addto,
int workspace_size_MB,
bool exhaustive_search,
Tensor* input_grad,
Tensor* filter_grad) {
Backend kernel_backend = Backend::UNDEFINED;
DataLayout kernel_layout = DataLayout::UNDEFINED;
DataType kernel_data_type = DataType::UNDEFINED;
if (kernel_backend == Backend::UNDEFINED ||
kernel_layout == DataLayout::UNDEFINED ||
kernel_data_type == DataType::UNDEFINED) {
auto kernel_key_set = ParseKernelKeyByInputArgs(input, filter, out_grad);
auto kernel_key = kernel_key_set.GetHighestPriorityKernelKey();
if (kernel_backend == Backend::UNDEFINED) {
kernel_backend = kernel_key.backend();
}
if (kernel_layout == DataLayout::UNDEFINED) {
kernel_layout = kernel_key.layout();
}
if (kernel_data_type == DataType::UNDEFINED) {
kernel_data_type = kernel_key.dtype();
}
}
VLOG(6) << "conv3d_grad API kernel key: [" << kernel_backend << ", "
<< kernel_layout << ", " << kernel_data_type << "]";
auto kernel_result = phi::KernelFactory::Instance().SelectKernelOrThrowError(
"conv3d_grad", {kernel_backend, kernel_layout, kernel_data_type}, true);
const auto& kernel = kernel_result.kernel;
VLOG(6) << "conv3d_grad API kernel: " << kernel;
auto* dev_ctx = GetDeviceContextByBackend(kernel_backend);
phi::TensorArgDef args0 = kernel.InputAt(0);
phi::TensorArgDef args1 = kernel.InputAt(1);
phi::TensorArgDef args2 = kernel.InputAt(2);
if (kernel_backend == Backend::GPU) {
args0.backend = Backend::GPU;
args1.backend = Backend::GPU;
args2.backend = Backend::GPU;
}
auto input_input = PrepareData(input, args0, {});
auto input_filter = PrepareData(filter, args1, {});
auto input_out_grad = PrepareData(out_grad, args2, {});
auto kernel_out_0 = SetKernelOutput(kernel_backend, input_grad);
auto kernel_out_1 = SetKernelOutput(kernel_backend, filter_grad);
phi::MetaTensor meta_out_0(kernel_out_0);
phi::MetaTensor meta_out_1(kernel_out_1);
phi::GeneralBinaryGradInferMeta(MakeMetaTensor(*input_input),
MakeMetaTensor(*input_filter),
kernel_out_0 ? &meta_out_0 : nullptr,
kernel_out_1 ? &meta_out_1 : nullptr);
using kernel_signature = void (*)(const platform::DeviceContext&,
const phi::DenseTensor&,
const phi::DenseTensor&,
const phi::DenseTensor&,
const std::vector<int>&,
const std::vector<int>&,
const std::string&,
int,
const std::vector<int>&,
const std::string&,
bool,
int,
bool,
phi::DenseTensor*,
phi::DenseTensor*);
auto* kernel_fn = kernel.GetVariadicKernelFn<kernel_signature>();
{
(*kernel_fn)(*dev_ctx,
*input_input,
*input_filter,
*input_out_grad,
strides,
paddings,
paddding_algorithm,
groups,
dilations,
data_format,
use_addto,
workspace_size_MB,
exhaustive_search,
kernel_out_0,
kernel_out_1);
}
}
Tensor copy_to_impl(const Tensor& x, Place place, bool blocking) {
Tensor out;
copy(x, place, blocking, &out);
......
......@@ -66,30 +66,6 @@ std::tuple<Tensor, Tensor, Tensor, Tensor, Tensor, Tensor> batch_norm_impl(
bool trainable_statistics,
bool fuse_with_relu);
Tensor conv2d_impl(const Tensor& input,
const Tensor& filter,
const std::vector<int>& strides,
const std::vector<int>& paddings,
const std::string& paddding_algorithm,
int groups,
const std::vector<int>& dilations,
const std::string& data_format,
bool use_addto,
int workspace_size_MB,
bool exhaustive_search);
Tensor conv3d_impl(const Tensor& input,
const Tensor& filter,
const std::vector<int>& strides,
const std::vector<int>& paddings,
const std::string& paddding_algorithm,
int groups,
const std::vector<int>& dilations,
const std::string& data_format,
bool use_addto,
int workspace_size_MB,
bool exhaustive_search);
Tensor copy_to_impl(const Tensor& x, Place place, bool blocking);
Tensor embedding_impl(const Tensor& x,
......@@ -116,36 +92,6 @@ std::tuple<Tensor, Tensor, Tensor> momentum_impl(
////////////////// Backward(grad) api impls //////////////////////
void conv2d_grad_impl(const Tensor& input,
const Tensor& filter,
const Tensor& out_grad,
const std::vector<int>& strides,
const std::vector<int>& paddings,
const std::string& paddding_algorithm,
int groups,
const std::vector<int>& dilations,
const std::string& data_format,
bool use_addto,
int workspace_size_MB,
bool exhaustive_search,
Tensor* input_grad,
Tensor* filter_grad);
void conv3d_grad_impl(const Tensor& input,
const Tensor& filter,
const Tensor& out_grad,
const std::vector<int>& strides,
const std::vector<int>& paddings,
const std::string& paddding_algorithm,
int groups,
const std::vector<int>& dilations,
const std::string& data_format,
bool use_addto,
int workspace_size_MB,
bool exhaustive_search,
Tensor* input_grad,
Tensor* filter_grad);
void imag_grad_impl(const Tensor& out_grad, Tensor* x_grad);
void embedding_grad_impl(const Tensor& x,
......
......@@ -513,7 +513,11 @@
- api : conv2d
args : (Tensor input, Tensor filter, int[] strides, int[] paddings, str paddding_algorithm, int groups, int[] dilations, str data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search)
output : Tensor
invoke : conv2d_impl(input, filter, strides, paddings, paddding_algorithm, groups, dilations, data_format, use_addto, workspace_size_MB, exhaustive_search)
infer_meta :
func : ConvInferMeta
kernel :
func : conv2d
use_gpudnn : true
backward : conv2d_grad
- api : conv2d_transpose
......@@ -529,7 +533,11 @@
- api : conv3d
args : (Tensor input, Tensor filter, int[] strides, int[] paddings, str paddding_algorithm, int groups, int[] dilations, str data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search)
output : Tensor
invoke : conv3d_impl(input, filter, strides, paddings, paddding_algorithm, groups, dilations, data_format, use_addto, workspace_size_MB, exhaustive_search)
infer_meta :
func : ConvInferMeta
kernel :
func : conv3d
use_gpudnn : true
backward : conv3d_grad
- api : conv3d_transpose
......
......@@ -438,7 +438,12 @@
forward : conv2d (Tensor input, Tensor filter, int[] strides, int[] paddings, str paddding_algorithm, int groups, int[] dilations, str data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search) -> Tensor(out)
args : (Tensor input, Tensor filter, Tensor out_grad, int[] strides, int[] paddings, str paddding_algorithm, int groups, int[] dilations, str data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search)
output : Tensor(input_grad), Tensor(filter_grad)
invoke : conv2d_grad_impl(input, filter, out_grad, strides, paddings, paddding_algorithm, groups, dilations, data_format, use_addto, workspace_size_MB, exhaustive_search, input_grad, filter_grad)
infer_meta :
func : GeneralBinaryGradInferMeta
param : [input, filter]
kernel :
func : conv2d_grad
use_gpudnn : true
backward : conv2d_grad_grad
- backward_api : conv2d_grad_grad
......@@ -478,7 +483,12 @@
forward : conv3d (Tensor input, Tensor filter, int[] strides, int[] paddings, str paddding_algorithm, int groups, int[] dilations, str data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search) -> Tensor(out)
args : (Tensor input, Tensor filter, Tensor out_grad, int[] strides, int[] paddings, str paddding_algorithm, int groups, int[] dilations, str data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search)
output : Tensor(input_grad), Tensor(filter_grad)
invoke : conv3d_grad_impl(input, filter, out_grad, strides, paddings, paddding_algorithm, groups, dilations, data_format, use_addto, workspace_size_MB, exhaustive_search, input_grad, filter_grad)
infer_meta :
func : GeneralBinaryGradInferMeta
param : [input, filter]
kernel :
func : conv3d_grad
use_gpudnn : true
backward : conv3d_grad_grad
- backward_api : conv3d_grad_grad
......
......@@ -2736,5 +2736,4 @@ void Unpool3dInferMeta(const MetaTensor& x,
} // namespace phi
PD_REGISTER_INFER_META_FN(add_raw, phi::ElementwiseRawInferMeta);
PD_REGISTER_INFER_META_FN(conv2d, phi::ConvInferMeta);
PD_REGISTER_INFER_META_FN(conv2d_infer, phi::ConvInferInferMeta);
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册