未验证 提交 34069c46 编写于 作者: Z zhangyuqin1998 提交者: GitHub

rename_bilinear_tensor_product (#52375)

* rename_bilinear_tensor_product

* fix
上级 a043d361
......@@ -92,7 +92,7 @@ namespace ops = paddle::operators;
DECLARE_INFER_SHAPE_FUNCTOR(bilinear_tensor_product,
BilinearTensorProductInferShapeFunctor,
PD_INFER_META(phi::BilinearTensorProductInferMeta));
PD_INFER_META(phi::BilinearInferMeta));
DECLARE_INFER_SHAPE_FUNCTOR(
bilinear_tensor_product_grad,
BilinearTensorProductGradInferShapeFunctor,
......
......@@ -152,7 +152,7 @@
infer_meta :
func : BilinearTensorProductGradInferMeta
kernel :
func : bilinear_tensor_product_grad
func : bilinear_grad
- backward_op : cast_grad
forward : cast (Tensor x, DataType dtype) -> Tensor(out)
......
......@@ -227,9 +227,9 @@
args : (Tensor x, Tensor y, Tensor weight, Tensor bias)
output : Tensor
infer_meta :
func : BilinearTensorProductInferMeta
func : BilinearInferMeta
kernel :
func : bilinear_tensor_product
func : bilinear
optional : bias
backward : bilinear_tensor_product_grad
......
......@@ -695,12 +695,12 @@ void BatchNormInferInferMeta(const MetaTensor& x,
config);
}
void BilinearTensorProductInferMeta(const MetaTensor& x,
const MetaTensor& y,
const MetaTensor& weight,
const MetaTensor& bias,
MetaTensor* out,
MetaConfig config) {
void BilinearInferMeta(const MetaTensor& x,
const MetaTensor& y,
const MetaTensor& weight,
const MetaTensor& bias,
MetaTensor* out,
MetaConfig config) {
auto x_dims = x.dims();
auto y_dims = y.dims();
auto weight_dims = weight.dims();
......
......@@ -198,12 +198,12 @@ void BatchNormInferInferMeta(const MetaTensor& x,
MetaTensor* variance_out,
MetaConfig config = MetaConfig());
void BilinearTensorProductInferMeta(const MetaTensor& x,
const MetaTensor& y,
const MetaTensor& weight,
const MetaTensor& bias,
MetaTensor* out,
MetaConfig config = MetaConfig());
void BilinearInferMeta(const MetaTensor& x,
const MetaTensor& y,
const MetaTensor& weight,
const MetaTensor& bias,
MetaTensor* out,
MetaConfig config = MetaConfig());
void BroadcastTensorsInferMeta(const std::vector<const MetaTensor*>& x,
std::vector<MetaTensor*> out);
......
......@@ -19,14 +19,14 @@
namespace phi {
template <typename T, typename Context>
void BilinearTensorProductGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& y,
const DenseTensor& weight,
const DenseTensor& dout,
DenseTensor* dx,
DenseTensor* dy,
DenseTensor* dweight,
DenseTensor* dbias);
void BilinearGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& y,
const DenseTensor& weight,
const DenseTensor& dout,
DenseTensor* dx,
DenseTensor* dy,
DenseTensor* dweight,
DenseTensor* dbias);
} // namespace phi
......@@ -20,11 +20,11 @@
namespace phi {
template <typename T, typename Context>
void BilinearTensorProductKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& y,
const DenseTensor& weight,
const paddle::optional<DenseTensor>& bias,
DenseTensor* out);
void BilinearKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& y,
const DenseTensor& weight,
const paddle::optional<DenseTensor>& bias,
DenseTensor* out);
} // namespace phi
......@@ -12,14 +12,10 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/bilinear_tensor_product_kernel.h"
#include "paddle/phi/kernels/bilinear_grad_kernel.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/bilinear_tensor_product_kernel_impl.h"
#include "paddle/phi/kernels/impl/bilinear_grad_kernel_impl.h"
PD_REGISTER_KERNEL(bilinear_tensor_product,
GPU,
ALL_LAYOUT,
phi::BilinearTensorProductKernel,
float,
double) {}
PD_REGISTER_KERNEL(
bilinear_grad, CPU, ALL_LAYOUT, phi::BilinearGradKernel, float, double) {}
......@@ -12,14 +12,10 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/bilinear_tensor_product_kernel.h"
#include "paddle/phi/kernels/bilinear_kernel.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/bilinear_tensor_product_kernel_impl.h"
#include "paddle/phi/kernels/impl/bilinear_kernel_impl.h"
PD_REGISTER_KERNEL(bilinear_tensor_product,
CPU,
ALL_LAYOUT,
phi::BilinearTensorProductKernel,
float,
double) {}
PD_REGISTER_KERNEL(
bilinear, CPU, ALL_LAYOUT, phi::BilinearKernel, float, double) {}
......@@ -12,14 +12,10 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/bilinear_tensor_product_grad_kernel.h"
#include "paddle/phi/kernels/bilinear_grad_kernel.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/bilinear_tensor_product_grad_kernel_impl.h"
#include "paddle/phi/kernels/impl/bilinear_grad_kernel_impl.h"
PD_REGISTER_KERNEL(bilinear_tensor_product_grad,
GPU,
ALL_LAYOUT,
phi::BilinearTensorProductGradKernel,
float,
double) {}
PD_REGISTER_KERNEL(
bilinear_grad, GPU, ALL_LAYOUT, phi::BilinearGradKernel, float, double) {}
......@@ -12,14 +12,10 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/bilinear_tensor_product_grad_kernel.h"
#include "paddle/phi/kernels/bilinear_kernel.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/bilinear_tensor_product_grad_kernel_impl.h"
#include "paddle/phi/kernels/impl/bilinear_kernel_impl.h"
PD_REGISTER_KERNEL(bilinear_tensor_product_grad,
CPU,
ALL_LAYOUT,
phi::BilinearTensorProductGradKernel,
float,
double) {}
PD_REGISTER_KERNEL(
bilinear, GPU, ALL_LAYOUT, phi::BilinearKernel, float, double) {}
......@@ -21,15 +21,15 @@
namespace phi {
template <typename T, typename Context>
void BilinearTensorProductGradKernel(const Context& ctx,
const DenseTensor& x,
const DenseTensor& y,
const DenseTensor& weight,
const DenseTensor& dout,
DenseTensor* dx,
DenseTensor* dy,
DenseTensor* dweight,
DenseTensor* dbias) {
void BilinearGradKernel(const Context& ctx,
const DenseTensor& x,
const DenseTensor& y,
const DenseTensor& weight,
const DenseTensor& dout,
DenseTensor* dx,
DenseTensor* dy,
DenseTensor* dweight,
DenseTensor* dbias) {
auto batch_size = x.dims()[0];
auto weight_dims = weight.dims();
int out_dim = weight_dims[0];
......
......@@ -22,12 +22,12 @@
namespace phi {
template <typename T, typename Context>
void BilinearTensorProductKernel(const Context& ctx,
const DenseTensor& x,
const DenseTensor& y,
const DenseTensor& weight,
const paddle::optional<DenseTensor>& bias,
DenseTensor* out) {
void BilinearKernel(const Context& ctx,
const DenseTensor& x,
const DenseTensor& y,
const DenseTensor& weight,
const paddle::optional<DenseTensor>& bias,
DenseTensor* out) {
ctx.template Alloc<T>(out);
auto y_mat = EigenMatrix<T>::From(y);
......
......@@ -18,13 +18,12 @@ namespace phi {
KernelSignature BilinearTensorProductOpArgumentMapping(
const ArgumentMappingContext& ctx) {
return KernelSignature(
"bilinear_tensor_product", {"X", "Y", "Weight", "Bias"}, {}, {"Out"});
return KernelSignature("bilinear", {"X", "Y", "Weight", "Bias"}, {}, {"Out"});
}
KernelSignature BilinearTensorProductGradOpArgumentMapping(
const ArgumentMappingContext& ctx) {
return KernelSignature("bilinear_tensor_product_grad",
return KernelSignature("bilinear_grad",
{"X", "Y", "Weight", "Out@GRAD"},
{},
{"X@GRAD", "Y@GRAD", "Weight@GRAD", "Bias@GRAD"});
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册