/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/phi/api/lib/api_custom_impl.h" #include "glog/logging.h" #include "paddle/phi/api/lib/api_gen_utils.h" #include "paddle/phi/api/lib/data_transform.h" #include "paddle/phi/api/lib/kernel_dispatch.h" #include "paddle/phi/api/lib/tensor_copy.h" #include "paddle/phi/common/type_traits.h" #include "paddle/phi/core/compat/convert_utils.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/core/meta_tensor.h" #include "paddle/phi/infermeta/backward.h" #include "paddle/phi/infermeta/binary.h" #include "paddle/phi/infermeta/multiary.h" #include "paddle/phi/infermeta/nullary.h" #include "paddle/phi/infermeta/unary.h" namespace paddle { namespace experimental { ////////////////// Forward api impls ////////////////////// Tensor copy_to_impl(const Tensor& x, Place place, bool blocking) { Tensor out; copy(x, place, blocking, &out); return out; } Tensor embedding_impl(const Tensor& x, const Tensor& weight, int64_t padding_idx, bool sparse) { DataType kernel_data_type = ParseDataType(weight); auto kernel_key_set = ParseKernelKeyByInputArgs(weight); auto kernel_key = kernel_key_set.GetHighestPriorityKernelKey(); VLOG(6) << "embedding API kernel key: [" << kernel_key.backend() << ", " << kernel_key.layout() << ", " << kernel_data_type << "]"; auto* dev_ctx = GetDeviceContextByBackend(kernel_key.backend()); Tensor api_output; if (phi::DenseTensor::classof(weight.impl().get())) { auto kernel_result = phi::KernelFactory::Instance().SelectKernelOrThrowError( "embedding", {kernel_key.backend(), kernel_key.layout(), kernel_data_type}); const auto& kernel = kernel_result.kernel; VLOG(6) << "embedding API kernel: " << kernel; auto input_x = PrepareData(x, kernel.InputAt(0), {}); auto input_weight = PrepareData(weight, kernel.InputAt(1), {}); auto* kernel_out = SetKernelOutput(&api_output); phi::MetaTensor meta_out(kernel_out); phi::EmbeddingInferMeta(MakeMetaTensor(*input_x), MakeMetaTensor(*input_weight), padding_idx, sparse, &meta_out); using kernel_signature = void (*)(const platform::DeviceContext&, const phi::DenseTensor&, const phi::DenseTensor&, int64_t, phi::DenseTensor*); auto* kernel_fn = kernel.GetVariadicKernelFn(); { (*kernel_fn)(*dev_ctx, *input_x, *input_weight, padding_idx, kernel_out); } } else { auto kernel_result = phi::KernelFactory::Instance().SelectKernelOrThrowError( "sparse_weight_embedding", {kernel_key.backend(), kernel_key.layout(), kernel_data_type}); const auto& kernel = kernel_result.kernel; VLOG(6) << "sparse_weight_embedding API kernel: " << kernel; auto input_x = PrepareData(x, kernel.InputAt(0), {}); auto input_weight = TensorToSelectedRows(weight); auto* kernel_out = SetKernelOutput(&api_output); phi::MetaTensor meta_out(kernel_out); phi::EmbeddingInferMeta(MakeMetaTensor(*input_x), MakeMetaTensor(*input_weight), padding_idx, sparse, &meta_out); using kernel_signature = void (*)(const platform::DeviceContext&, const phi::DenseTensor&, const phi::SelectedRows&, int64_t, phi::DenseTensor*); auto* kernel_fn = kernel.GetVariadicKernelFn(); { (*kernel_fn)(*dev_ctx, *input_x, *input_weight, padding_idx, kernel_out); } } return api_output; } std::vector split_impl(const Tensor& x, const IntArray& num_or_sections, const Scalar& axis) { auto kernel_key_set = ParseKernelKeyByInputArgs(x); auto kernel_key = kernel_key_set.GetHighestPriorityKernelKey(); Backend kernel_backend = kernel_key.backend(); DataLayout kernel_layout = kernel_key.layout(); DataType kernel_data_type = kernel_key.dtype(); auto kernel_result = phi::KernelFactory::Instance().SelectKernelOrThrowError( "split", {kernel_backend, kernel_layout, kernel_data_type}); const auto& kernel = kernel_result.kernel; VLOG(6) << "split API kernel key: [" << kernel_backend << ", " << kernel_layout << ", " << kernel_data_type << "]"; VLOG(6) << "split API kernel: " << kernel; auto* dev_ctx = GetDeviceContextByBackend(kernel_backend); auto dense_x = PrepareData(x, kernel.InputAt(0), {}); // Calculate the number of out tensors size_t out_number; if (num_or_sections.size() == 1) { if (num_or_sections.GetData()[0] < 0) { out_number = 1; } else { out_number = num_or_sections.GetData()[0]; } } else { out_number = num_or_sections.size(); } std::vector out; auto dense_outs = SetKernelOutput(out_number, &out); std::vector meta_outs; meta_outs.reserve(out_number); std::vector meta_out_ptrs; meta_out_ptrs.reserve(out_number); for (size_t i = 0; i < out_number; ++i) { meta_outs.push_back(dense_outs[i]); meta_out_ptrs.push_back(&meta_outs.back()); } phi::SplitInferMeta( MakeMetaTensor(*dense_x), num_or_sections, axis, meta_out_ptrs); using kernel_signature = void (*)(const platform::DeviceContext&, const phi::DenseTensor&, const phi::IntArray&, const phi::Scalar&, std::vector&); auto* kernel_fn = kernel.GetVariadicKernelFn(); (*kernel_fn)(*dev_ctx, *dense_x, phi::IntArray(num_or_sections), phi::Scalar(axis), dense_outs); return out; } ////////////////// Backward(grad) api impls ////////////////////// std::tuple batch_norm_impl( const Tensor& x, const Tensor& scale, const Tensor& bias, const Tensor& mean, const Tensor& variance, float momentum, float epsilon, const std::string& data_layout, bool is_test, bool use_global_stats, bool trainable_statistics, bool fuse_with_relu) { Backend kernel_backend = Backend::UNDEFINED; DataLayout kernel_layout = DataLayout::UNDEFINED; DataType kernel_data_type = DataType::UNDEFINED; kernel_data_type = ParseDataType(x); if (kernel_backend == Backend::UNDEFINED || kernel_layout == DataLayout::UNDEFINED || kernel_data_type == DataType::UNDEFINED) { auto kernel_key_set = ParseKernelKeyByInputArgs(x); auto kernel_key = kernel_key_set.GetHighestPriorityKernelKey(); if (kernel_backend == Backend::UNDEFINED) { kernel_backend = kernel_key.backend(); } if (kernel_layout == DataLayout::UNDEFINED) { kernel_layout = kernel_key.layout(); } if (kernel_data_type == DataType::UNDEFINED) { kernel_data_type = kernel_key.dtype(); } } auto kernel_result = phi::KernelFactory::Instance().SelectKernelOrThrowError( "batch_norm", {kernel_backend, kernel_layout, kernel_data_type}); const auto& kernel = kernel_result.kernel; VLOG(6) << "batch_norm API kernel key: [" << kernel_backend << ", " << kernel_layout << ", " << kernel_data_type << "]"; VLOG(6) << "batch_norm API kernel: " << kernel; auto* dev_ctx = GetDeviceContextByBackend(kernel_backend); auto input_x = PrepareData(x, kernel.InputAt(0), {}); auto input_scale = PrepareData(scale, kernel.InputAt(1), {}); auto input_bias = PrepareData(bias, kernel.InputAt(2), {}); auto input_mean = PrepareData(mean, kernel.InputAt(3), {}); auto input_variance = PrepareData(variance, kernel.InputAt(4), {}); std::tuple api_output; auto kernel_out_0 = SetKernelOutput(&std::get<0>(api_output)); std::get<1>(api_output).set_impl(mean.impl()); std::get<2>(api_output).set_impl(variance.impl()); auto kernel_out_1 = SetKernelOutput(&std::get<1>(api_output)); auto kernel_out_2 = SetKernelOutput(&std::get<2>(api_output)); auto kernel_out_3 = SetKernelOutput(&std::get<3>(api_output)); auto kernel_out_4 = SetKernelOutput(&std::get<4>(api_output)); auto kernel_out_5 = SetKernelOutput(&std::get<5>(api_output)); phi::MetaTensor meta_out_0(kernel_out_0); phi::MetaTensor meta_out_1(kernel_out_1); phi::MetaTensor meta_out_2(kernel_out_2); phi::MetaTensor meta_out_3(kernel_out_3); phi::MetaTensor meta_out_4(kernel_out_4); phi::MetaTensor meta_out_5(kernel_out_5); phi::BatchNormInferMeta(MakeMetaTensor(*input_x), MakeMetaTensor(*input_scale), MakeMetaTensor(*input_bias), MakeMetaTensor(*input_mean), MakeMetaTensor(*input_variance), momentum, epsilon, data_layout, is_test, use_global_stats, trainable_statistics, fuse_with_relu, &meta_out_0, &meta_out_1, &meta_out_2, &meta_out_3, &meta_out_4, &meta_out_5); using kernel_signature = void (*)(const platform::DeviceContext&, const phi::DenseTensor&, const phi::DenseTensor&, const phi::DenseTensor&, const phi::DenseTensor&, const phi::DenseTensor&, float, float, const std::string&, bool, bool, bool, bool, phi::DenseTensor*, phi::DenseTensor*, phi::DenseTensor*, phi::DenseTensor*, phi::DenseTensor*, phi::DenseTensor*); auto* kernel_fn = kernel.GetVariadicKernelFn(); { (*kernel_fn)(*dev_ctx, *input_x, *input_scale, *input_bias, *input_mean, *input_variance, momentum, epsilon, data_layout, is_test, use_global_stats, trainable_statistics, fuse_with_relu, kernel_out_0, kernel_out_1, kernel_out_2, kernel_out_3, kernel_out_4, kernel_out_5); } return api_output; } void imag_grad_impl(const Tensor& out_grad, Tensor* x_grad) { phi::KernelKey kernel_key{ParseBackend(out_grad), out_grad.layout(), phi::dtype::ToComplex(out_grad.dtype())}; auto kernel_result = phi::KernelFactory::Instance().SelectKernelOrThrowError( "imag_grad", kernel_key); const auto& kernel = kernel_result.kernel; VLOG(6) << "imag_grad API kernel key: " << kernel_key; VLOG(6) << "imag_grad API kernel: " << kernel; auto* dev_ctx = GetDeviceContextByBackend(kernel_key.backend()); auto dense_out_grad = TensorToDenseTensor(out_grad); auto kernel_out = SetKernelOutput(x_grad); phi::MetaTensor meta_out(kernel_out); phi::RealAndImagGradInferMeta(*dense_out_grad, &meta_out); using kernel_signature = void (*)( const phi::DeviceContext&, const phi::DenseTensor&, phi::DenseTensor*); auto* kernel_fn = kernel.GetVariadicKernelFn(); (*kernel_fn)(*dev_ctx, *dense_out_grad, kernel_out); } void embedding_grad_impl(const Tensor& x, const Tensor& weight, const Tensor& out_grad, int64_t padding_idx, bool sparse, Tensor* weight_grad) { DataType kernel_data_type = ParseDataType(weight); auto kernel_key_set = ParseKernelKeyByInputArgs(weight); auto kernel_key = kernel_key_set.GetHighestPriorityKernelKey(); VLOG(6) << "embedding_grad API kernel key: [" << kernel_key.backend() << ", " << kernel_key.layout() << ", " << kernel_data_type << "]"; auto* dev_ctx = GetDeviceContextByBackend(kernel_key.backend()); if (phi::DenseTensor::classof(weight.impl().get())) { std::string kernel_name = sparse ? "embedding_sparse_grad" : "embedding_grad"; auto kernel_result = phi::KernelFactory::Instance().SelectKernelOrThrowError( kernel_name, {kernel_key.backend(), kernel_key.layout(), kernel_data_type}); const auto& kernel = kernel_result.kernel; VLOG(6) << kernel_name << " API kernel: " << kernel; auto input_x = PrepareData(x, kernel.InputAt(0), {}); auto input_weight = PrepareData(weight, kernel.InputAt(1), {}); auto input_out_grad = PrepareData(out_grad, kernel.InputAt(2), {}); if (sparse) { auto* kernel_out = SetSelectedRowsKernelOutput(weight_grad); phi::MetaTensor meta_out(kernel_out); meta_out.set_dims(input_weight->dims()); meta_out.set_dtype(input_weight->dtype()); kernel_out->set_height(input_weight->dims()[0]); using kernel_signature = void (*)(const platform::DeviceContext&, const phi::DenseTensor&, const phi::DenseTensor&, const phi::DenseTensor&, int64_t, phi::SelectedRows*); auto* kernel_fn = kernel.GetVariadicKernelFn(); (*kernel_fn)(*dev_ctx, *input_x, *input_weight, *input_out_grad, padding_idx, kernel_out); } else { auto* kernel_out = SetKernelOutput(weight_grad); phi::MetaTensor meta_out(kernel_out); phi::UnchangedInferMeta(MakeMetaTensor(*input_weight), &meta_out); using kernel_signature = void (*)(const platform::DeviceContext&, const phi::DenseTensor&, const phi::DenseTensor&, const phi::DenseTensor&, int64_t, phi::DenseTensor*); auto* kernel_fn = kernel.GetVariadicKernelFn(); (*kernel_fn)(*dev_ctx, *input_x, *input_weight, *input_out_grad, padding_idx, kernel_out); } } else { std::string kernel_name = sparse ? "sparse_weight_embedding_sparse_grad" : "sparse_weight_embedding_grad"; auto kernel_result = phi::KernelFactory::Instance().SelectKernelOrThrowError( kernel_name, {kernel_key.backend(), kernel_key.layout(), kernel_data_type}); const auto& kernel = kernel_result.kernel; VLOG(6) << kernel_name << " API kernel: " << kernel; auto input_x = PrepareData(x, kernel.InputAt(0), {}); auto input_weight = TensorToSelectedRows(weight); auto input_out_grad = PrepareData(out_grad, kernel.InputAt(2), {}); if (sparse) { auto* kernel_out = SetSelectedRowsKernelOutput(weight_grad); phi::MetaTensor meta_out(kernel_out); phi::UnchangedInferMeta(MakeMetaTensor(*input_weight), &meta_out); using kernel_signature = void (*)(const platform::DeviceContext&, const phi::DenseTensor&, const phi::SelectedRows&, const phi::DenseTensor&, int64_t, phi::SelectedRows*); auto* kernel_fn = kernel.GetVariadicKernelFn(); (*kernel_fn)(*dev_ctx, *input_x, *input_weight, *input_out_grad, padding_idx, kernel_out); } else { auto* kernel_out = SetKernelOutput(weight_grad); phi::MetaTensor meta_out(kernel_out); meta_out.set_dims(input_weight->GetCompleteDims()); meta_out.set_dtype(input_weight->dtype()); using kernel_signature = void (*)(const platform::DeviceContext&, const phi::DenseTensor&, const phi::SelectedRows&, const phi::DenseTensor&, int64_t, phi::DenseTensor*); auto* kernel_fn = kernel.GetVariadicKernelFn(); (*kernel_fn)(*dev_ctx, *input_x, *input_weight, *input_out_grad, padding_idx, kernel_out); } } } void real_grad_impl(const Tensor& out_grad, Tensor* x_grad) { phi::KernelKey kernel_key{ParseBackend(out_grad), out_grad.layout(), phi::dtype::ToComplex(out_grad.dtype())}; auto kernel_result = phi::KernelFactory::Instance().SelectKernelOrThrowError( "real_grad", kernel_key); const auto& kernel = kernel_result.kernel; VLOG(6) << "real_grad API kernel key: " << kernel_key; VLOG(6) << "real_grad API kernel: " << kernel; auto* dev_ctx = GetDeviceContextByBackend(kernel_key.backend()); auto dense_out_grad = TensorToDenseTensor(out_grad); auto kernel_out = SetKernelOutput(x_grad); phi::MetaTensor meta_out(kernel_out); phi::RealAndImagGradInferMeta(*dense_out_grad, &meta_out); using kernel_signature = void (*)( const phi::DeviceContext&, const phi::DenseTensor&, phi::DenseTensor*); auto* kernel_fn = kernel.GetVariadicKernelFn(); (*kernel_fn)(*dev_ctx, *dense_out_grad, kernel_out); } } // namespace experimental } // namespace paddle