From e8405e5c6189255139d09edf5c1e36a6bf82a1af Mon Sep 17 00:00:00 2001 From: GaoWei8 <53294385+GaoWei8@users.noreply.github.com> Date: Fri, 30 Aug 2019 14:42:09 +0800 Subject: [PATCH] Modify the dropout op to multi-thread (#19504) * Modify the dropout op to multi-thread test=develop * define parallel test=develop --- paddle/fluid/operators/dropout_op.h | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/paddle/fluid/operators/dropout_op.h b/paddle/fluid/operators/dropout_op.h index 09c4899c73..20742f9a45 100644 --- a/paddle/fluid/operators/dropout_op.h +++ b/paddle/fluid/operators/dropout_op.h @@ -77,13 +77,20 @@ class CPUDropoutKernel : public framework::OpKernel { } } } else { - auto X = EigenMatrix::Reshape(*x, 1); - auto Y = EigenMatrix::Reshape(*y, 1); - auto& place = - *context.template device_context().eigen_device(); if (upscale_in_train) { - Y.device(place) = X; + const auto* X_data = x->data(); + auto* Y_data = y->mutable_data(context.GetPlace()); +#ifdef PADDLE_WITH_MKLML +#pragma omp parallel for +#endif + for (int i = 0; i < x->numel(); i++) { + Y_data[i] = X_data[i]; + } } else { + auto X = EigenMatrix::Reshape(*x, 1); + auto Y = EigenMatrix::Reshape(*y, 1); + auto& place = + *context.template device_context().eigen_device(); Y.device(place) = X * static_cast(1.0f - dropout_prob); } } -- GitLab