From 2d0ddf8c411df13f01de851f9b32f5b658a0a014 Mon Sep 17 00:00:00 2001 From: tensor-tang Date: Thu, 30 Aug 2018 11:39:03 +0800 Subject: [PATCH] refine cpu gru batch mode --- paddle/fluid/operators/fusion_gru_op.cc | 290 +++++++++---------- paddle/fluid/operators/math/sequence2batch.h | 10 +- 2 files changed, 146 insertions(+), 154 deletions(-) diff --git a/paddle/fluid/operators/fusion_gru_op.cc b/paddle/fluid/operators/fusion_gru_op.cc index 3a34aa86b63..fcd551ed3b0 100644 --- a/paddle/fluid/operators/fusion_gru_op.cc +++ b/paddle/fluid/operators/fusion_gru_op.cc @@ -13,16 +13,13 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/fusion_gru_op.h" +#include // for memcpy #include -#include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/operators/math/blas.h" -#include "paddle/fluid/operators/math/detail/activation_functions.h" -#include "paddle/fluid/operators/math/detail/gru_cpu_kernel.h" -#include "paddle/fluid/operators/math/detail/gru_kernel.h" +#include "paddle/fluid/operators/math/cpu_vec.h" #include "paddle/fluid/operators/math/fc_compute.h" -#include "paddle/fluid/operators/math/gru_compute.h" -#include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/math/sequence2batch.h" +#include "paddle/fluid/platform/cpu_info.h" namespace paddle { namespace operators { @@ -35,12 +32,12 @@ void FusionGRUOp::InferShape(framework::InferShapeContext* ctx) const { "Input(WeightH) of GRU should not be null."); PADDLE_ENFORCE(ctx->HasOutput("XX"), "Output(XX) of GRU should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("BatchedGate"), - "Output(BatchedGate) of GRU should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("BatchResetHiddenPrev"), - "Output(BatchResetHiddenPrev) of GRU should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("BatchedHidden"), - "Output(BatchedHidden) of GRU should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("ReorderedH0"), + "Output(ReorderedH0) of GRU should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("BatchedInput"), + "Output(BatchedInput) of GRU should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("BatchedOut"), + "Output(BatchedOut) of GRU should not be null."); PADDLE_ENFORCE(ctx->HasOutput("Hidden"), "Output(Hidden) of GRU should not be null."); @@ -83,9 +80,8 @@ void FusionGRUOp::InferShape(framework::InferShapeContext* ctx) const { } framework::DDim out_dims({x_dims[0], frame_size}); ctx->SetOutputDim("Hidden", out_dims); - ctx->SetOutputDim("BatchedGate", {x_dims[0], wx_dims[1]}); - ctx->SetOutputDim("BatchedHidden", out_dims); - ctx->SetOutputDim("BatchResetHiddenPrev", out_dims); + ctx->SetOutputDim("BatchedInput", {x_dims[0], wx_dims[1]}); + ctx->SetOutputDim("BatchedOut", out_dims); ctx->ShareLoD("X", "Hidden"); int xx_width = x_dims[1] > wx_dims[1] ? wx_dims[1] : x_dims[1]; @@ -115,22 +111,26 @@ void FusionGRUOpMaker::Make() { "(Tensor) The FC weight with shape (M x 3D)," "where M is the dim size of x, D is the hidden size. "); AddInput("WeightH", - "(Tensor) (D x 3D) Same as GRUOp, where D is the hidden size. "); + "(Tensor) (D x 3D) Same as GRUOp, where D is the hidden size. " + "This weight is not exactly D x 3D as: {W_update, W_reset, W_state}" + "Acutally they are D x 2D and D x D two part weights." + "{W_update, W_reset; W_state}" + "{D x (D + D); D x D}"); AddInput("Bias", "(Tensor, optional) (1 x 3D)." "Almost same as GRUOp." "Note: if have FC bias it should be added on this bias.") .AsDispensable(); + AddOutput("ReorderedH0", "(Tensor) (N x D), which N is the min-batch size.") + .AsIntermediate(); AddOutput("XX", - "(LoDTensor) the result after X * WeightX (size is T x 4D)" + "(LoDTensor) the result after X * WeightX (size is T x 3D)" " or batched_X (size is T x M), this will be automatically chosen," " where T is the total time steps in this mini-batch," " D is the hidden size, M is the dim size of x input.") .AsIntermediate(); - AddOutput("BatchedGate", "(LoDTensor) Same as GRUOp").AsIntermediate(); - AddOutput("BatchResetHiddenPrev", "(LoDTensor) (T x 3D) Same as GRUOp.") - .AsIntermediate(); - AddOutput("BatchedHidden", "(LoDTensor) (T X D) Same as GRUOp.") + AddOutput("BatchedInput", "(LoDTensor) (T x 3D)").AsIntermediate(); + AddOutput("BatchedOut", "(LoDTensor) (T X D) save batched hidden.") .AsIntermediate(); AddOutput("Hidden", "(LoDTensor) (T x D) Same as GRUOp"); AddAttr("activation", @@ -153,45 +153,53 @@ more details can refer to GRU op. )DOC"); } -template -inline void ReorderInitState(const DeviceContext& ctx, - const framework::Tensor& src, - framework::Vector index_lod, - framework::Tensor* dst, bool indexed_src) { - math::CopyMatrixRowsFunctor row_shuffle; - dst->mutable_data(src.dims(), ctx.GetPlace()); - row_shuffle(ctx, src, index_lod, dst, indexed_src); -} - -template +template class FusionGRUKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { + using DeviceContext = paddle::platform::CPUDeviceContext; auto* x = ctx.Input("X"); auto* wx = ctx.Input("WeightX"); auto* wh = ctx.Input("WeightH"); auto* bias = ctx.Input("Bias"); auto* h0 = ctx.Input("H0"); + auto* reordered_h0 = ctx.Output("ReorderedH0"); auto* xx = ctx.Output("XX"); - auto* batched_gate = ctx.Output("BatchedGate"); - auto* batch_reset_hidden_prev = - ctx.Output("BatchResetHiddenPrev"); - auto* batch_hidden = ctx.Output("BatchedHidden"); + auto* batched_input = ctx.Output("BatchedInput"); + auto* batched_out = ctx.Output("BatchedOut"); auto* hidden_out = ctx.Output("Hidden"); - bool is_reverse = ctx.Attr("is_reverse"); - T* xx_data = xx->mutable_data(ctx.GetPlace()); - T* batched_gate_data = batched_gate->mutable_data(ctx.GetPlace()); - batch_reset_hidden_prev->mutable_data(ctx.GetPlace()); - batch_hidden->mutable_data(ctx.GetPlace()); - hidden_out->mutable_data(ctx.GetPlace()); + bool is_reverse = ctx.Attr("is_reverse"); + std::function act_gate, act_state; + std::function bias_sub; + auto& act_gate_str = ctx.Attr("gate_activation"); + auto& act_state_str = ctx.Attr("activation"); + if (platform::jit::MayIUse(platform::jit::avx)) { + math::VecActivations act_functor; + act_gate = act_functor(act_gate_str); + act_state = act_functor(act_state_str); + bias_sub = math::vec_bias_sub; + } else { + math::VecActivations act_functor; + act_gate = act_functor(act_gate_str); + act_state = act_functor(act_state_str); + bias_sub = math::vec_bias_sub; + } const T* x_data = x->data(); const T* wx_data = wx->data(); const T* wh_data = wh->data(); + T* xx_data = xx->mutable_data(ctx.GetPlace()); + T* batched_input_data = batched_input->mutable_data(ctx.GetPlace()); + T* batched_out_data = batched_out->mutable_data(ctx.GetPlace()); + hidden_out->mutable_data(ctx.GetPlace()); + auto x_dims = x->dims(); auto wx_dims = wx->dims(); + const int D3 = wx_dims[1]; + const int D = D3 / 3; + const int D2 = D * 2; auto& dev_ctx = ctx.template device_context(); auto blas = math::GetBlas(dev_ctx); math::LoDTensor2BatchFunctor to_batch; @@ -199,125 +207,110 @@ class FusionGRUKernel : public framework::OpKernel { math::FCCompute(blas, x_dims[0], wx_dims[1], x_dims[1], x_data, wx_data, xx_data, bias ? bias->data() : NULL); - to_batch(dev_ctx, *xx, batched_gate, true, is_reverse); + to_batch(dev_ctx, *xx, batched_input, true, is_reverse); } else { to_batch(dev_ctx, *x, xx, true, is_reverse); - batched_gate->set_lod(xx->lod()); + batched_input->set_lod(xx->lod()); math::FCCompute(blas, x_dims[0], wx_dims[1], x_dims[1], - xx_data, wx_data, batched_gate_data, + xx_data, wx_data, batched_input_data, bias ? bias->data() : NULL); } - int frame_size = static_cast(wx_dims[1] / 3); - math::GRUMetaValue gru_value; - gru_value.gate_weight = const_cast(wh_data); - gru_value.state_weight = - const_cast(wh_data + 2 * frame_size * frame_size); - Tensor ordered_h0; - - framework::Vector order(batched_gate->lod()[2]); + auto batched_lod = batched_input->lod(); + const auto& seq_order = batched_lod[2]; + const int max_bs = seq_order.size(); + reordered_h0->Resize({max_bs, D}); + int tstart = 0; + T* prev_hidden_data = NULL; if (h0) { - ReorderInitState( - ctx.template device_context(), *h0, order, &ordered_h0, - true); - gru_value.prev_out_value = ordered_h0.data(); + // reorder h0 + T* reordered_h0_data = reordered_h0->mutable_data(ctx.GetPlace()); + const T* h0_data = h0->data(); + prev_hidden_data = reordered_h0_data; + size_t sz = sizeof(T) * D; + for (int i = 0; i < max_bs; ++i) { + std::memcpy(reordered_h0_data, h0_data + seq_order[i] * D, sz); + reordered_h0_data += D; + } } else { - gru_value.prev_out_value = nullptr; + // compute without h0 + T* cur_in_data = batched_input_data; + T* cur_out_data = batched_out_data; + // W: {W_update, W_reset; W_state} + for (int i = 0; i < max_bs; ++i) { + // update gate + act_gate(D, cur_in_data, cur_in_data); + // state gate + act_state(D, cur_in_data + D2, cur_in_data + D2); + // out = a*b + blas.VMUL(D, cur_in_data, cur_in_data + D2, cur_out_data); + // add offset + cur_in_data += D3; + cur_out_data += D; + } + tstart = 1; + prev_hidden_data = batched_out_data; } - auto batch_starts = batched_gate->lod()[0]; - size_t seq_len = batch_starts.size() - 1; - auto active_node = - math::detail::GetActivationType(ctx.Attr("activation")); - auto active_gate = math::detail::GetActivationType( - ctx.Attr("gate_activation")); - -#ifdef PADDLE_WITH_MKLML - // use MKL packed to speedup GEMM - if (FLAGS_paddle_num_threads >= 4) { - auto blas = math::GetBlas(dev_ctx); - T* packed_gate = blas.GEMM_ALLOC(CblasBMatrix, 1 /*height of C*/, - frame_size * 2 /*width of weight*/, - frame_size /*height of height*/); - PADDLE_ENFORCE(packed_gate); - blas.GEMM_PACK(CblasBMatrix, CblasNoTrans, 1 /*cur bs?*/, frame_size * 2, - frame_size, T(1.0), gru_value.gate_weight, frame_size * 2, - packed_gate); - T* packed_state = blas.GEMM_ALLOC(CblasBMatrix, 1 /*height of C*/, - frame_size /*width of weight*/, - frame_size /*height of height*/); - PADDLE_ENFORCE(packed_state); - blas.GEMM_PACK(CblasBMatrix, CblasNoTrans, 1 /*cur bs?*/, frame_size, - frame_size, T(1.0), gru_value.state_weight, frame_size, - packed_state); - for (size_t n = 0; n < seq_len; n++) { - int bstart = static_cast(batch_starts[n]); - int bend = static_cast(batch_starts[n + 1]); - int cur_batch_size = bend - bstart; - - Tensor gate_t = batched_gate->Slice(bstart, bend); - Tensor reset_hidden_prev_t = - batch_reset_hidden_prev->Slice(bstart, bend); - Tensor hidden_t = batch_hidden->Slice(bstart, bend); - gru_value.output_value = hidden_t.data(); - gru_value.gate_value = gate_t.data(); - gru_value.reset_output_value = reset_hidden_prev_t.data(); - - if (gru_value.prev_out_value) { - blas.GEMM_COMPUTE( - CblasNoTrans, CblasPacked, cur_batch_size, frame_size * 2, - frame_size, gru_value.prev_out_value, frame_size, packed_gate, - frame_size * 2, T(1), gru_value.gate_value, frame_size * 3); - } - - math::detail::forward_reset_output( - math::detail::forward::gru_resetOutput(), gru_value, frame_size, - cur_batch_size, active_gate); - - if (gru_value.prev_out_value) { - blas.GEMM_COMPUTE( - CblasNoTrans, CblasPacked, cur_batch_size, frame_size, frame_size, - gru_value.reset_output_value, frame_size, packed_state, - frame_size, T(1), gru_value.gate_value + frame_size * 2, - frame_size * 3); - } - - math::detail::forward_final_output( - math::detail::forward::gru_finalOutput(), gru_value, frame_size, - cur_batch_size, active_node); - - gru_value.prev_out_value = gru_value.output_value; + // Then start from next + const T* wh_state_data = wh_data + D * D2; + const auto& batch_starts = batched_lod[0]; + const int max_seq_len = batch_starts.size() - 1; + batched_input_data = batched_input_data + tstart * max_bs * D3; + batched_out_data = batched_out_data + tstart * max_bs * D; + for (int step = tstart; step < max_seq_len; ++step) { + const int cur_bs = batch_starts[step + 1] - batch_starts[step]; + // gemm prev * (Wu + Wr) + blas.GEMM(CblasNoTrans, CblasNoTrans, cur_bs, D2, D, static_cast(1), + prev_hidden_data, D, wh_data, D2, static_cast(1), + batched_input_data, D3); + + T* cur_batched_data = batched_input_data; + T* cur_prev_hidden_data = prev_hidden_data; + for (int i = 0; i < cur_bs; ++i) { + act_gate(D2, cur_batched_data, cur_batched_data); + // rt = rt*ht_1 inplace result + // TODO(TJ): try to save to cur out data + // maybe get benifits avoiding cache miss in next gemm + blas.VMUL(D, cur_prev_hidden_data, cur_batched_data + D, + cur_batched_data + D); + + cur_batched_data += D3; + cur_prev_hidden_data += D; } - blas.GEMM_FREE(packed_gate); - blas.GEMM_FREE(packed_state); - } else { -#endif - for (size_t n = 0; n < seq_len; n++) { - int bstart = static_cast(batch_starts[n]); - int bend = static_cast(batch_starts[n + 1]); - int cur_batch_size = bend - bstart; - - Tensor gate_t = batched_gate->Slice(bstart, bend); - Tensor reset_hidden_prev_t = - batch_reset_hidden_prev->Slice(bstart, bend); - Tensor hidden_t = batch_hidden->Slice(bstart, bend); - gru_value.output_value = hidden_t.data(); - gru_value.gate_value = gate_t.data(); - gru_value.reset_output_value = reset_hidden_prev_t.data(); - - math::GRUUnitFunctor::compute( - dev_ctx, gru_value, frame_size, cur_batch_size, active_node, - active_gate); - - gru_value.prev_out_value = gru_value.output_value; + cur_batched_data = batched_input_data; + blas.GEMM(CblasNoTrans, CblasNoTrans, cur_bs, D, D, static_cast(1), + cur_batched_data + D, D3, wh_state_data, D, static_cast(1), + cur_batched_data + D2, D3); + + T* cur_out_data = batched_out_data; + cur_prev_hidden_data = prev_hidden_data; + for (int i = 0; i < cur_bs; ++i) { + // ht~ = act_state(...) + act_state(D, cur_batched_data + D2, cur_batched_data + D2); + // ht~~ = zt*ht~ inplace result + blas.VMUL(D, cur_batched_data, cur_batched_data + D2, + cur_batched_data + D2); + // zt = 1 - zt inplace result + bias_sub(D, static_cast(1), cur_batched_data, cur_batched_data); + // zt = ht_1 * zt + blas.VMUL(D, cur_prev_hidden_data, cur_batched_data, cur_batched_data); + // out = zt + ht~~ + blas.VADD(D, cur_batched_data, cur_batched_data + D2, cur_out_data); + + cur_batched_data += D3; + cur_prev_hidden_data += D; + cur_out_data += D; } -#ifdef PADDLE_WITH_MKLML + prev_hidden_data = batched_out_data; + batched_out_data = cur_out_data; + batched_input_data = cur_batched_data; } -#endif + math::Batch2LoDTensorFunctor to_seq; - batch_hidden->set_lod(batched_gate->lod()); - to_seq(dev_ctx, *batch_hidden, hidden_out); + batched_out->set_lod(batched_lod); + to_seq(dev_ctx, *batched_out, hidden_out); } }; @@ -327,6 +320,5 @@ class FusionGRUKernel : public framework::OpKernel { namespace ops = paddle::operators; REGISTER_OPERATOR(fusion_gru, ops::FusionGRUOp, ops::FusionGRUOpMaker, paddle::framework::DefaultGradOpDescMaker); -REGISTER_OP_CPU_KERNEL( - fusion_gru, ops::FusionGRUKernel, - ops::FusionGRUKernel); +REGISTER_OP_CPU_KERNEL(fusion_gru, ops::FusionGRUKernel, + ops::FusionGRUKernel); diff --git a/paddle/fluid/operators/math/sequence2batch.h b/paddle/fluid/operators/math/sequence2batch.h index 07372235a7c..a3186f82d0c 100644 --- a/paddle/fluid/operators/math/sequence2batch.h +++ b/paddle/fluid/operators/math/sequence2batch.h @@ -92,7 +92,7 @@ class LoDTensor2BatchFunctor { // Calculate the start position of each batch. // example: sequences = {s0, s1, s2} // s0: 0 0 0 0, s1: 1 1 1 1 1, s2: 2 2 2 - // num_batch = 5, + // max_seqlen = 5, // batchIndex = {b0, b1, b2, b3, b4} // b0: 1 0 2, b1: 1 0 2, b2: 1 0 2, b3: 1 0, b4: 1 // batch_start_positions[6] = {0, 3, 6, 9, 11, 12} @@ -109,7 +109,7 @@ class LoDTensor2BatchFunctor { // where 1 is the second sequence, // 0 is the first sequence, // 2 is the third sequence. - // The num_batch represents batch size after rearranging the + // The max_seqlen represents batch size after rearranging the // input LodTensor. It is also the maximum length of input sequence. paddle::framework::LoD batch_lods; @@ -118,8 +118,8 @@ class LoDTensor2BatchFunctor { batch_lods.emplace_back(std::vector{0}); // batch_lods[0] is the start positions for batch LoDTensor - int num_batch = seq_info[0].length; - batch_lods[0].resize(static_cast(num_batch + 1)); + int max_seqlen = seq_info[0].length; + batch_lods[0].resize(static_cast(max_seqlen + 1)); // batch_lods[1] is the raw index in the input LoDTensor batch_lods[1].resize(static_cast(lod_tensor.dims()[0])); // batch_lods[2] is the sort order for the input LoDTensor. @@ -128,7 +128,7 @@ class LoDTensor2BatchFunctor { size_t* batch_starts = batch_lods[0].data(); size_t* seq2batch_idx = batch_lods[1].data(); batch_starts[0] = 0; - for (int n = 0; n < num_batch; n++) { + for (int n = 0; n < max_seqlen; n++) { auto batch_id = static_cast(batch_starts[n]); for (size_t i = 0; i < seq_info.size(); ++i) { int seq_len = seq_info[i].length; -- GitLab