From 05ad15832aba64097759f8b7f232beba58cabedb Mon Sep 17 00:00:00 2001 From: Kexin Zhao Date: Mon, 19 Mar 2018 11:09:03 -0700 Subject: [PATCH] initial commit --- paddle/fluid/operators/dropout_op.cu | 15 ++++++----- .../fluid/tests/unittests/test_dropout_op.py | 26 +++++++++++++++++++ 2 files changed, 34 insertions(+), 7 deletions(-) diff --git a/paddle/fluid/operators/dropout_op.cu b/paddle/fluid/operators/dropout_op.cu index d6f9c04359d..c949968a744 100644 --- a/paddle/fluid/operators/dropout_op.cu +++ b/paddle/fluid/operators/dropout_op.cu @@ -18,6 +18,7 @@ limitations under the License. */ #include #include #include "paddle/fluid/operators/dropout_op.h" +#include "paddle/fluid/platform/float16.h" namespace paddle { namespace operators { @@ -51,7 +52,7 @@ class GPUDropoutKernel : public framework::OpKernel { auto* x = context.Input("X"); auto* y = context.Output("Out"); y->mutable_data(context.GetPlace()); - AttrType dropout_prob = context.Attr("dropout_prob"); + AttrType dropout_prob = context.Attr("dropout_prob")); auto X = EigenMatrix::Reshape(*x, 1); auto Y = EigenMatrix::Reshape(*y, 1); @@ -74,7 +75,7 @@ class GPUDropoutKernel : public framework::OpKernel { context.cuda_device_context().stream()>>>( size, seed, dropout_prob, x_data, mask_data, y_data); } else { - Y.device(place) = X * (1.0f - dropout_prob); + Y.device(place) = X * static_cast(1.0f - dropout_prob); } } }; @@ -83,9 +84,9 @@ class GPUDropoutKernel : public framework::OpKernel { } // namespace paddle namespace ops = paddle::operators; +namespace plat = paddle::platform; REGISTER_OP_CUDA_KERNEL( - dropout, - ops::GPUDropoutKernel); -REGISTER_OP_CUDA_KERNEL( - dropout_grad, - ops::DropoutGradKernel); + dropout, ops::GPUDropoutKernel, + ops::GPUDropoutKernel); +REGISTER_OP_CUDA_KERNEL(dropout_grad, + ops::DropoutGradKernel); diff --git a/python/paddle/fluid/tests/unittests/test_dropout_op.py b/python/paddle/fluid/tests/unittests/test_dropout_op.py index 60930a612c1..6fcd5ac1a66 100644 --- a/python/paddle/fluid/tests/unittests/test_dropout_op.py +++ b/python/paddle/fluid/tests/unittests/test_dropout_op.py @@ -82,5 +82,31 @@ class TestDropoutOp5(OpTest): self.check_output() +class TestFP16DropoutOp1(OpTest): + def setUp(self): + x = np.random.random((32, 64)).astype("float16") + self.op_type = "dropout" + self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.attrs = {'dropout_prob': 0.35, 'fix_seed': True, 'is_test': True} + self.outputs = {'Out': x * (1.0 - self.attrs['dropout_prob'])} + + def test_check_output(self): + if core.is_compiled_with_cuda() and core.op_support_gpu("dropout"): + self.check_output_with_place(core.CUDAPlace(0), atol=1e-3) + + +class TestFP16DropoutOp2(OpTest): + def setUp(self): + x = np.random.random((32, 64, 3)).astype("float16") + self.op_type = "dropout" + self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.attrs = {'dropout_prob': 0.75, 'is_test': True} + self.outputs = {'Out': x * (1.0 - self.attrs['dropout_prob'])} + + def test_check_output(self): + if core.is_compiled_with_cuda() and core.op_support_gpu("dropout"): + self.check_output_with_place(core.CUDAPlace(0), atol=1e-3) + + if __name__ == '__main__': unittest.main() -- GitLab