提交 05ad1583 编写于 作者: K Kexin Zhao

initial commit

上级 c0421379
...@@ -18,6 +18,7 @@ limitations under the License. */ ...@@ -18,6 +18,7 @@ limitations under the License. */
#include <thrust/random.h> #include <thrust/random.h>
#include <thrust/transform.h> #include <thrust/transform.h>
#include "paddle/fluid/operators/dropout_op.h" #include "paddle/fluid/operators/dropout_op.h"
#include "paddle/fluid/platform/float16.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
...@@ -51,7 +52,7 @@ class GPUDropoutKernel : public framework::OpKernel<T> { ...@@ -51,7 +52,7 @@ class GPUDropoutKernel : public framework::OpKernel<T> {
auto* x = context.Input<Tensor>("X"); auto* x = context.Input<Tensor>("X");
auto* y = context.Output<Tensor>("Out"); auto* y = context.Output<Tensor>("Out");
y->mutable_data<T>(context.GetPlace()); y->mutable_data<T>(context.GetPlace());
AttrType dropout_prob = context.Attr<AttrType>("dropout_prob"); AttrType dropout_prob = context.Attr<AttrType>("dropout_prob"));
auto X = EigenMatrix<T>::Reshape(*x, 1); auto X = EigenMatrix<T>::Reshape(*x, 1);
auto Y = EigenMatrix<T>::Reshape(*y, 1); auto Y = EigenMatrix<T>::Reshape(*y, 1);
...@@ -74,7 +75,7 @@ class GPUDropoutKernel : public framework::OpKernel<T> { ...@@ -74,7 +75,7 @@ class GPUDropoutKernel : public framework::OpKernel<T> {
context.cuda_device_context().stream()>>>( context.cuda_device_context().stream()>>>(
size, seed, dropout_prob, x_data, mask_data, y_data); size, seed, dropout_prob, x_data, mask_data, y_data);
} else { } else {
Y.device(place) = X * (1.0f - dropout_prob); Y.device(place) = X * static_cast<T>(1.0f - dropout_prob);
} }
} }
}; };
...@@ -83,9 +84,9 @@ class GPUDropoutKernel : public framework::OpKernel<T> { ...@@ -83,9 +84,9 @@ class GPUDropoutKernel : public framework::OpKernel<T> {
} // namespace paddle } // namespace paddle
namespace ops = paddle::operators; namespace ops = paddle::operators;
namespace plat = paddle::platform;
REGISTER_OP_CUDA_KERNEL( REGISTER_OP_CUDA_KERNEL(
dropout, dropout, ops::GPUDropoutKernel<plat::CUDADeviceContext, float, float>,
ops::GPUDropoutKernel<paddle::platform::CUDADeviceContext, float, float>); ops::GPUDropoutKernel<plat::CUDADeviceContext, plat::float16, float>);
REGISTER_OP_CUDA_KERNEL( REGISTER_OP_CUDA_KERNEL(dropout_grad,
dropout_grad, ops::DropoutGradKernel<plat::CUDADeviceContext, float>);
ops::DropoutGradKernel<paddle::platform::CUDADeviceContext, float>);
...@@ -82,5 +82,31 @@ class TestDropoutOp5(OpTest): ...@@ -82,5 +82,31 @@ class TestDropoutOp5(OpTest):
self.check_output() self.check_output()
class TestFP16DropoutOp1(OpTest):
def setUp(self):
x = np.random.random((32, 64)).astype("float16")
self.op_type = "dropout"
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
self.attrs = {'dropout_prob': 0.35, 'fix_seed': True, 'is_test': True}
self.outputs = {'Out': x * (1.0 - self.attrs['dropout_prob'])}
def test_check_output(self):
if core.is_compiled_with_cuda() and core.op_support_gpu("dropout"):
self.check_output_with_place(core.CUDAPlace(0), atol=1e-3)
class TestFP16DropoutOp2(OpTest):
def setUp(self):
x = np.random.random((32, 64, 3)).astype("float16")
self.op_type = "dropout"
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
self.attrs = {'dropout_prob': 0.75, 'is_test': True}
self.outputs = {'Out': x * (1.0 - self.attrs['dropout_prob'])}
def test_check_output(self):
if core.is_compiled_with_cuda() and core.op_support_gpu("dropout"):
self.check_output_with_place(core.CUDAPlace(0), atol=1e-3)
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册