未验证 提交 608a3f28 编写于 作者: L LoneRanger 提交者: GitHub

【PaddlePaddle Hackathon 4】No.56 : add fp16 test and bf16 for poisson (#51662)

* add fp16 and bf16 support for poisson

* add fp16 and bf16 support for searchsorted

* fix bug

* Update test_searchsorted_op.py

fix function name

* Update test_poisson_op.py

fix function name

* fix bug

* remove the searchorted

* Update test_poisson_op.py

* fix bug of TestPoissonBF16Op

* Update test_poisson_op.py

* Update test_poisson_op.py

* Update test_poisson_op.py

* fix bug of import

* fix bug
上级 b0ea1346
......@@ -15,5 +15,11 @@
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/poisson_grad_kernel_impl.h"
PD_REGISTER_KERNEL(
poisson_grad, GPU, ALL_LAYOUT, phi::PoissonGradKernel, float, double) {}
PD_REGISTER_KERNEL(poisson_grad,
GPU,
ALL_LAYOUT,
phi::PoissonGradKernel,
float,
double,
phi::dtype::float16,
phi::dtype::bfloat16) {}
......@@ -64,5 +64,11 @@ void PoissonKernel(const Context& ctx, const DenseTensor& x, DenseTensor* out) {
} // namespace phi
PD_REGISTER_KERNEL(
poisson, GPU, ALL_LAYOUT, phi::PoissonKernel, float, double) {}
PD_REGISTER_KERNEL(poisson,
GPU,
ALL_LAYOUT,
phi::PoissonKernel,
float,
double,
phi::dtype::float16,
phi::dtype::bfloat16) {}
......@@ -16,9 +16,14 @@ import math
import unittest
import numpy as np
from eager_op_test import OpTest
from eager_op_test import (
OpTest,
convert_float_to_uint16,
convert_uint16_to_float,
)
import paddle
from paddle.fluid import core
paddle.enable_static()
paddle.seed(100)
......@@ -42,17 +47,20 @@ class TestPoissonOp1(OpTest):
def setUp(self):
self.op_type = "poisson"
self.python_api = paddle.tensor.poisson
self.init_dtype()
self.config()
self.attrs = {}
self.inputs = {'X': np.full([2048, 1024], self.lam, dtype=self.dtype)}
self.outputs = {'Out': np.ones([2048, 1024], dtype=self.dtype)}
def init_dtype(self):
self.dtype = "float64"
def config(self):
self.lam = 10
self.a = 5
self.b = 15
self.dtype = "float64"
def verify_output(self, outs):
hist, prob = output_hist(np.array(outs[0]), self.lam, self.a, self.b)
......@@ -368,5 +376,56 @@ class TestPoissonAPI(unittest.TestCase):
paddle.enable_static()
class TestPoissonFP16OP(TestPoissonOp1):
def init_dtype(self):
self.dtype = np.float16
@unittest.skipIf(
not core.is_compiled_with_cuda()
or not core.is_bfloat16_supported(core.CUDAPlace(0)),
"core is not complied with CUDA and not support the bfloat16",
)
class TestPoissonBF16Op(OpTest):
def setUp(self):
self.op_type = "poisson"
self.python_api = paddle.tensor.poisson
self.__class__.op_type = self.op_type
self.config()
x = np.full([2048, 1024], self.lam, dtype="float32")
out = np.ones([2048, 1024], dtype="float32")
self.attrs = {}
self.inputs = {'X': convert_float_to_uint16(x)}
self.outputs = {'Out': convert_float_to_uint16(out)}
def config(self):
self.lam = 10
self.a = 5
self.b = 15
self.dtype = np.uint16
def verify_output(self, outs):
hist, prob = output_hist(
convert_uint16_to_float(np.array(outs[0])), self.lam, self.a, self.b
)
np.testing.assert_allclose(hist, prob, rtol=0.01)
def test_check_output(self):
place = core.CUDAPlace(0)
self.check_output_with_place_customized(self.verify_output, place)
def test_check_grad(self):
place = core.CUDAPlace(0)
self.check_grad_with_place(
place,
['X'],
'Out',
user_defined_grads=[np.zeros([2048, 1024], dtype="float32")],
user_defined_grad_outputs=[
np.random.rand(2048, 1024).astype("float32")
],
)
if __name__ == "__main__":
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册