From 2669aea67fffa9a0db1616f8f15a9c6b05be4352 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Tue, 3 Apr 2018 13:07:44 +0800 Subject: [PATCH] sgd_op support optimize SelectedRows --- paddle/fluid/framework/selected_rows.cc | 12 ++- paddle/fluid/framework/selected_rows.h | 13 ++- paddle/fluid/operators/sgd_op.cc | 24 +++-- paddle/fluid/operators/sgd_op.h | 123 +++++++++++++++--------- 4 files changed, 121 insertions(+), 51 deletions(-) diff --git a/paddle/fluid/framework/selected_rows.cc b/paddle/fluid/framework/selected_rows.cc index 504344e93..162b0355b 100644 --- a/paddle/fluid/framework/selected_rows.cc +++ b/paddle/fluid/framework/selected_rows.cc @@ -1,8 +1,11 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -13,6 +16,13 @@ limitations under the License. */ namespace paddle { namespace framework { + +size_t GetIndex(const std::vector& rows, int64_t value) { + auto it = std::find(rows.begin(), rows.end(), value); + PADDLE_ENFORCE(it != rows.end(), "id should be in rows"); + return static_cast(std::distance(rows.begin(), it)); +} + void SerializeToStream(std::ostream& os, const SelectedRows& selected_rows, const platform::DeviceContext& dev_ctx) { { // the 1st field, uint32_t version diff --git a/paddle/fluid/framework/selected_rows.h b/paddle/fluid/framework/selected_rows.h index c9c2c1bb7..e461b7bdc 100644 --- a/paddle/fluid/framework/selected_rows.h +++ b/paddle/fluid/framework/selected_rows.h @@ -1,8 +1,11 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -10,6 +13,9 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once + +#include + #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/tensor.h" @@ -59,6 +65,11 @@ class SelectedRows { int64_t height_; }; +/** + * Find the index of value in rows. + */ +size_t GetIndex(const std::vector& rows, int64_t value); + /* * Serialize/Desiralize SelectedRows to std::ostream * You can pass ofstream or ostringstream to serilize to file diff --git a/paddle/fluid/operators/sgd_op.cc b/paddle/fluid/operators/sgd_op.cc index d0aa2f9cb..9cdc5b3f1 100644 --- a/paddle/fluid/operators/sgd_op.cc +++ b/paddle/fluid/operators/sgd_op.cc @@ -43,9 +43,19 @@ class SGDOp : public framework::OperatorWithKernel { protected: framework::OpKernelType GetExpectedKernelType( const framework::ExecutionContext& ctx) const override { - return framework::OpKernelType( - framework::ToDataType(ctx.Input("Param")->type()), - ctx.GetPlace()); + auto* table_var = ctx.InputVar("Param"); + if (table_var->IsType()) { + return framework::OpKernelType( + framework::ToDataType(table_var->Get().type()), + ctx.device_context()); + } else if (table_var->IsType()) { + return framework::OpKernelType( + framework::ToDataType( + table_var->Get().value().type()), + ctx.device_context()); + } else { + PADDLE_THROW("Param should be LoDTensor or SelectedRows"); + } } }; @@ -53,10 +63,12 @@ class SGDOpMaker : public framework::OpProtoAndCheckerMaker { public: SGDOpMaker(OpProto* proto, OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("Param", "(Tensor) Input parameter"); + AddInput("Param", "(Tensor or SelectedRows) Input parameter"); AddInput("LearningRate", "(Tensor) Learning rate of SGD"); - AddInput("Grad", "(Tensor) Input gradient"); - AddOutput("ParamOut", "(Tensor) Output parameter"); + AddInput("Grad", "(Tensor or SelectedRows) Input gradient"); + AddOutput("ParamOut", + "(Tensor or SelectedRows, same with Param) " + "Output parameter, should share the same memory with Param"); AddComment(R"DOC( SGD operator diff --git a/paddle/fluid/operators/sgd_op.h b/paddle/fluid/operators/sgd_op.h index 0ad801079..237cd2f81 100644 --- a/paddle/fluid/operators/sgd_op.h +++ b/paddle/fluid/operators/sgd_op.h @@ -23,60 +23,97 @@ namespace operators { template class SGDOpKernel : public framework::OpKernel { public: - void Compute(const framework::ExecutionContext& ctx) const override { - auto* param = ctx.Input("Param"); - auto* param_out = ctx.Output("ParamOut"); - auto* learning_rate = ctx.Input("LearningRate"); - - auto* grad_var = ctx.InputVar("Grad"); - // Actually, all tensors are LoDTensor except SelectedRows. - if (grad_var->IsType()) { - param_out->mutable_data(ctx.GetPlace()); - auto* grad = ctx.Input("Grad"); - - auto p = framework::EigenVector::Flatten(*param); - auto g = framework::EigenVector::Flatten(*grad); - auto o = framework::EigenVector::Flatten(*param_out); - auto* lr = learning_rate->data(); - - o = p - lr[0] * g; - } else if (grad_var->IsType()) { - // TODO(qijun): In Sparse SGD operator, in-place update is enforced. - // This manual optimization brings difficulty to track data dependency. - // It's better to find a more elegant solution. - PADDLE_ENFORCE_EQ(param, param_out); - auto* grad = ctx.Input("Grad"); + void Compute(const framework::ExecutionContext &ctx) const override { + const auto *learning_rate = ctx.Input("LearningRate"); + + const auto *param_var = ctx.InputVar("Param"); + const auto *grad_var = ctx.InputVar("Grad"); + + if (param_var->IsType()) { + const auto *param = ctx.Input("Param"); + auto *param_out = ctx.Output("ParamOut"); + + // Actually, all tensors are LoDTensor except SelectedRows. + if (grad_var->IsType()) { + param_out->mutable_data(ctx.GetPlace()); + const auto *grad = ctx.Input("Grad"); + + auto p = framework::EigenVector::Flatten(*param); + auto g = framework::EigenVector::Flatten(*grad); + auto o = framework::EigenVector::Flatten(*param_out); + auto *lr = learning_rate->data(); + + o = p - lr[0] * g; + } else if (grad_var->IsType()) { + // TODO(qijun): In Sparse SGD operator, in-place update is enforced. + // This manual optimization brings difficulty to track data dependency. + // It's better to find a more elegant solution. + PADDLE_ENFORCE_EQ(param, param_out); + const auto *grad = ctx.Input("Grad"); + + // for distributed training, a sparse var may be empty, + // just skip updating. + if (grad->rows().size() == 0) { + return; + } + + auto grad_height = grad->height(); + auto out_dims = param_out->dims(); + PADDLE_ENFORCE_EQ(grad_height, out_dims[0]); + + auto &grad_value = grad->value(); + auto &grad_rows = grad->rows(); + + size_t grad_row_numel = grad_value.numel() / grad_rows.size(); + PADDLE_ENFORCE_EQ(grad_row_numel, param_out->numel() / grad_height); + + auto *grad_data = grad_value.data(); + auto *out_data = param_out->data(); + auto *lr = learning_rate->data(); + for (size_t i = 0; i < grad_rows.size(); i++) { + PADDLE_ENFORCE(grad_rows[i] < grad_height, + "Input rows index should less than height"); + for (int64_t j = 0; j < grad_row_numel; j++) { + out_data[grad_rows[i] * grad_row_numel + j] -= + lr[0] * grad_data[i * grad_row_numel + j]; + } + } + } else { + PADDLE_THROW("Unsupported Variable Type of Grad"); + } + } else if (param_var->IsType()) { + PADDLE_ENFORCE(grad_var->IsType(), + "when param " + "is SelectedRows, gradient should also be SelectedRows"); + const auto ¶m = param_var->Get(); + auto *param_out = ctx.Output("ParamOut"); + const auto &grad = grad_var->Get(); // for distributed training, a sparse var may be empty, // just skip updating. - if (grad->rows().size() == 0) { + if (grad.rows().size() == 0) { return; } - auto in_height = grad->height(); - auto out_dims = param_out->dims(); - PADDLE_ENFORCE_EQ(in_height, out_dims[0]); - - auto& in_value = grad->value(); - auto& in_rows = grad->rows(); + size_t param_row_width = param.value().numel() / param.rows().size(); + size_t grad_row_width = grad.value().numel() / grad.rows().size(); + PADDLE_ENFORCE_EQ(param_row_width, grad_row_width, + "param_row should have the same size with grad_row"); - int64_t in_row_numel = in_value.numel() / in_rows.size(); - PADDLE_ENFORCE_EQ(in_row_numel, param_out->numel() / in_height); - - auto* in_data = in_value.data(); - auto* out_data = param_out->data(); - auto* lr = learning_rate->data(); - for (size_t i = 0; i < in_rows.size(); i++) { - PADDLE_ENFORCE(in_rows[i] < in_height, + const auto *lr = learning_rate->data(); + const auto *grad_data = grad.value().data(); + auto *out_data = param_out->mutable_value()->data(); + for (size_t i = 0; i < grad.rows().size(); i++) { + PADDLE_ENFORCE(grad.rows()[i] < grad.height(), "Input rows index should less than height"); - for (int64_t j = 0; j < in_row_numel; j++) { - out_data[in_rows[i] * in_row_numel + j] -= - lr[0] * in_data[i * in_row_numel + j]; + size_t id_index = framework::GetIndex(param.rows(), grad.rows()[i]); + for (int64_t j = 0; j < grad_row_width; j++) { + out_data[id_index * grad_row_width + j] -= + lr[0] * grad_data[i * grad_row_width + j]; } } - } else { - PADDLE_THROW("Unsupported Variable Type of Grad"); + PADDLE_THROW("Unsupported Variable Type of Parameter"); } } }; -- GitLab