未验证 提交 e15ef948 编写于 作者: W wangshengxiang 提交者: GitHub

[XPU] bind op: atan & deformable_conv_v1 (#50373)

上级 14a92c8c
......@@ -48,6 +48,9 @@ XPUOpMap& get_kl2_ops() {
phi::DataType::INT64,
phi::DataType::BOOL})},
{"assign_value", XPUKernelSet({phi::DataType::FLOAT32})},
{"atan", XPUKernelSet({phi::DataType::FLOAT32, phi::DataType::FLOAT16})},
{"atan_grad",
XPUKernelSet({phi::DataType::FLOAT32, phi::DataType::FLOAT16})},
{"batch_norm_grad", XPUKernelSet({phi::DataType::FLOAT32})},
{"batch_norm", XPUKernelSet({phi::DataType::FLOAT32})},
{"bmm", XPUKernelSet({phi::DataType::FLOAT32})},
......@@ -132,6 +135,8 @@ XPUOpMap& get_kl2_ops() {
phi::DataType::INT64})},
{"deformable_conv_grad", XPUKernelSet({phi::DataType::FLOAT32})},
{"deformable_conv", XPUKernelSet({phi::DataType::FLOAT32})},
{"deformable_conv_v1_grad", XPUKernelSet({phi::DataType::FLOAT32})},
{"deformable_conv_v1", XPUKernelSet({phi::DataType::FLOAT32})},
{"depthwise_conv2d_grad", XPUKernelSet({phi::DataType::FLOAT32})},
{"depthwise_conv2d", XPUKernelSet({phi::DataType::FLOAT32})},
{"diag_v2",
......
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/activation_grad_kernel.h"
#include "paddle/phi/backends/xpu/enforce_xpu.h"
#include "paddle/phi/core/kernel_registry.h"
namespace phi {
template <typename T, typename Context>
void AtanGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& dout,
DenseTensor* dx) {
using XPUType = typename XPUTypeTrait<T>::Type;
const T* x_ptr = x.data<T>();
const T* dout_ptr = dout.data<T>();
T* dx_ptr = dev_ctx.template Alloc<T>(dx);
int r = xpu::arctan_grad(dev_ctx.x_context(),
reinterpret_cast<const XPUType*>(x_ptr),
reinterpret_cast<const XPUType*>(dout_ptr),
reinterpret_cast<XPUType*>(dx_ptr),
x.numel());
PADDLE_ENFORCE_XDNN_SUCCESS(r, "arctan_grad");
}
} // namespace phi
PD_REGISTER_KERNEL(atan_grad,
XPU,
ALL_LAYOUT,
phi::AtanGradKernel,
float,
phi::dtype::float16) {}
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/activation_kernel.h"
#include "paddle/phi/backends/xpu/enforce_xpu.h"
#include "paddle/phi/core/kernel_registry.h"
namespace phi {
template <typename T, typename Context>
void AtanKernel(const Context& dev_ctx,
const DenseTensor& x,
DenseTensor* out) {
using XPUType = typename XPUTypeTrait<T>::Type;
const T* x_ptr = x.data<T>();
T* out_ptr = dev_ctx.template Alloc<T>(out);
int r = xpu::arctan(dev_ctx.x_context(),
reinterpret_cast<const XPUType*>(x_ptr),
reinterpret_cast<XPUType*>(out_ptr),
x.numel());
PADDLE_ENFORCE_XDNN_SUCCESS(r, "arctan");
}
} // namespace phi
PD_REGISTER_KERNEL(
atan, XPU, ALL_LAYOUT, phi::AtanKernel, float, phi::dtype::float16) {}
# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
import numpy as np
import paddle
paddle.enable_static()
sys.path.append("..")
from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import (
XPUOpTestWrapper,
create_test_class,
get_xpu_op_support_types,
)
class XPUTestAtanOp(XPUOpTestWrapper):
def __init__(self):
self.op_name = "atan"
self.use_dynamic_create_class = False
class TestAtanOp(XPUOpTest):
def setUp(self):
self.set_xpu()
self.op_type = "atan"
self.eager_mode = True
# override
self.init_input_shape()
x = np.random.random(self.x_shape).astype(self.in_type)
y = np.arctan(x)
self.inputs = {'X': x}
self.outputs = {'Out': y}
def init_input_shape(self):
self.x_shape = [15, 6]
def set_xpu(self):
self.__class__.no_need_check_grad = False
self.place = paddle.XPUPlace(0)
def test_check_output(self):
self.check_output_with_place(self.place)
def test_check_grad(self):
self.check_grad_with_place(
self.place, ['X'], 'Out', check_eager=self.eager_mode
)
class Test1x1(TestAtanOp):
def init_input_shape(self):
self.x_shape = [1, 1]
class Test1(TestAtanOp):
def init_input_shape(self):
self.x_shape = [1]
support_types = get_xpu_op_support_types("atan")
for stype in support_types:
create_test_class(globals(), XPUTestAtanOp, stype)
if __name__ == "__main__":
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册