diff --git a/paddle/fluid/framework/operator.cc b/paddle/fluid/framework/operator.cc index f6a43804ef2fd73c4a2c2c3b3dfbb90bff1c451b..a3b4a8c0829ae3324e933309b2eaea35fe571997 100644 --- a/paddle/fluid/framework/operator.cc +++ b/paddle/fluid/framework/operator.cc @@ -35,6 +35,17 @@ std::vector> kKernelPriority = { std::make_tuple(platform::CPUPlace(), LibraryType::kPlain), }; +proto::VarType::Type GetDataTypeOfVar(const Variable* var) { + if (var->IsType()) { + return framework::ToDataType(var->Get().type()); + } else if (var->IsType()) { + return framework::ToDataType( + var->Get().value().type()); + } else { + PADDLE_THROW("Var should be LoDTensor or SelectedRows"); + } +} + static DDim GetDims(const Scope& scope, const std::string& name) { Variable* var = scope.FindVar(name); if (var == nullptr) { diff --git a/paddle/fluid/framework/operator.h b/paddle/fluid/framework/operator.h index 41214b41cb68cbd7049552f39195ae5257e0d06f..b7a7c69b4c8493f945926c75797c49d327a3197e 100644 --- a/paddle/fluid/framework/operator.h +++ b/paddle/fluid/framework/operator.h @@ -61,6 +61,8 @@ inline std::string GradVarName(const std::string& var_name) { return var_name + kGradVarSuffix; } +proto::VarType::Type GetDataTypeOfVar(const Variable* var); + class OperatorBase; class ExecutionContext; diff --git a/paddle/fluid/framework/selected_rows.cc b/paddle/fluid/framework/selected_rows.cc index 504344e937dfdc362cdc22298a5f963d87011e9d..d9d6b7dd67f1c6e4bbd6a4e1a8f0843d4cb93c05 100644 --- a/paddle/fluid/framework/selected_rows.cc +++ b/paddle/fluid/framework/selected_rows.cc @@ -1,8 +1,11 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -13,6 +16,7 @@ limitations under the License. */ namespace paddle { namespace framework { + void SerializeToStream(std::ostream& os, const SelectedRows& selected_rows, const platform::DeviceContext& dev_ctx) { { // the 1st field, uint32_t version diff --git a/paddle/fluid/framework/selected_rows.h b/paddle/fluid/framework/selected_rows.h index 9458d56a01df432aea573d796456b9be31350038..8e2d9470d3954e0f66c74828a8d8292c2875a8f4 100644 --- a/paddle/fluid/framework/selected_rows.h +++ b/paddle/fluid/framework/selected_rows.h @@ -1,8 +1,11 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -47,6 +50,15 @@ class SelectedRows { void set_rows(const Vector& rows) { rows_ = rows; } + /** + * get the index of id in rows + */ + int64_t index(int64_t id) const { + auto it = std::find(rows_.begin(), rows_.end(), id); + PADDLE_ENFORCE(it != rows_.end(), "id should be in rows"); + return static_cast(std::distance(rows_.begin(), it)); + } + DDim GetCompleteDims() const { std::vector dims = vectorize(value_->dims()); dims[0] = height_; diff --git a/paddle/fluid/operators/lookup_table_op.cc b/paddle/fluid/operators/lookup_table_op.cc index deabcdc99f819851b2df9bb0c7b05a5b339568f3..bf33be310686640fa187a07cf46a157b7f433340 100644 --- a/paddle/fluid/operators/lookup_table_op.cc +++ b/paddle/fluid/operators/lookup_table_op.cc @@ -18,22 +18,6 @@ limitations under the License. */ namespace paddle { namespace operators { -static inline framework::OpKernelType ExpectedKernelType( - const framework::ExecutionContext& ctx) { - auto* table_var = ctx.InputVar("W"); - if (table_var->IsType()) { - return framework::OpKernelType( - framework::ToDataType(table_var->Get().type()), - ctx.device_context()); - } else if (table_var->IsType()) { - return framework::OpKernelType( - framework::ToDataType(table_var->Get().value().type()), - ctx.device_context()); - } else { - PADDLE_THROW("W should be LoDTensor or SelectedRows"); - } -} - class LookupTableOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; @@ -67,7 +51,8 @@ class LookupTableOp : public framework::OperatorWithKernel { protected: framework::OpKernelType GetExpectedKernelType( const framework::ExecutionContext& ctx) const override { - return ExpectedKernelType(ctx); + auto data_type = framework::GetDataTypeOfVar(ctx.InputVar("W")); + return framework::OpKernelType(data_type, ctx.device_context()); } }; @@ -138,7 +123,8 @@ class LookupTableOpGrad : public framework::OperatorWithKernel { protected: framework::OpKernelType GetExpectedKernelType( const framework::ExecutionContext& ctx) const override { - return ExpectedKernelType(ctx); + auto data_type = framework::GetDataTypeOfVar(ctx.InputVar("W")); + return framework::OpKernelType(data_type, ctx.device_context()); } }; diff --git a/paddle/fluid/operators/lookup_table_op.h b/paddle/fluid/operators/lookup_table_op.h index fff5edda62d4b115605a4cab35ed5457b4db5f21..cb088c267bcc028ff11583cd73de5ca1722a9b69 100644 --- a/paddle/fluid/operators/lookup_table_op.h +++ b/paddle/fluid/operators/lookup_table_op.h @@ -30,13 +30,7 @@ using LoDTensor = framework::LoDTensor; using SelectedRows = framework::SelectedRows; using DDim = framework::DDim; -static constexpr int64_t kNoPadding = -1; - -inline size_t getIndex(const std::vector &rows, int64_t value) { - auto it = std::find(rows.begin(), rows.end(), value); - PADDLE_ENFORCE(it != rows.end(), "id should be in rows"); - return static_cast(std::distance(rows.begin(), it)); -} +constexpr int64_t kNoPadding = -1; template class LookupTableKernel : public framework::OpKernel { @@ -55,7 +49,9 @@ class LookupTableKernel : public framework::OpKernel { auto *table_t = context.Input("W"); table_dim = table_t->value().dims(); } else { - PADDLE_THROW("table only support LoDTensor and SelectedRows"); + PADDLE_THROW( + "The parameter W of a LookupTable " + "must be either LoDTensor or SelectedRows"); } int64_t *ids; @@ -107,7 +103,7 @@ class LookupTableKernel : public framework::OpKernel { memset(output + i * row_width, 0, row_width * sizeof(T)); } else { PADDLE_ENFORCE_GE(ids[i], 0); - auto id_index = getIndex(table_t.rows(), ids[i]); + auto id_index = table_t.index(ids[i]); memcpy(output + i * row_width, table + id_index * row_width, row_width * sizeof(T)); } @@ -128,7 +124,9 @@ class LookupTableGradKernel : public framework::OpKernel { auto *table_t = context.Input("W"); table_dim = table_t->value().dims(); } else { - PADDLE_THROW("table only support LoDTensor and SelectedRows"); + PADDLE_THROW( + "The parameter W of a LookupTable " + "must be either LoDTensor or SelectedRows"); } bool is_sparse = context.Attr("is_sparse"); diff --git a/paddle/fluid/operators/sgd_op.cc b/paddle/fluid/operators/sgd_op.cc index d0aa2f9cbadaadf4e7e625628d9db5677d50d277..074fa9e00f2ec531f324ff10113d95144687d500 100644 --- a/paddle/fluid/operators/sgd_op.cc +++ b/paddle/fluid/operators/sgd_op.cc @@ -43,9 +43,8 @@ class SGDOp : public framework::OperatorWithKernel { protected: framework::OpKernelType GetExpectedKernelType( const framework::ExecutionContext& ctx) const override { - return framework::OpKernelType( - framework::ToDataType(ctx.Input("Param")->type()), - ctx.GetPlace()); + auto data_type = framework::GetDataTypeOfVar(ctx.InputVar("Param")); + return framework::OpKernelType(data_type, ctx.device_context()); } }; @@ -53,10 +52,12 @@ class SGDOpMaker : public framework::OpProtoAndCheckerMaker { public: SGDOpMaker(OpProto* proto, OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("Param", "(Tensor) Input parameter"); + AddInput("Param", "(Tensor or SelectedRows) Input parameter"); AddInput("LearningRate", "(Tensor) Learning rate of SGD"); - AddInput("Grad", "(Tensor) Input gradient"); - AddOutput("ParamOut", "(Tensor) Output parameter"); + AddInput("Grad", "(Tensor or SelectedRows) Input gradient"); + AddOutput("ParamOut", + "(Tensor or SelectedRows, same with Param) " + "Output parameter, should share the same memory with Param"); AddComment(R"DOC( SGD operator diff --git a/paddle/fluid/operators/sgd_op.h b/paddle/fluid/operators/sgd_op.h index 0ad801079400f1830d85a945e57a434a86adeb00..8d2bdf75903b4958e14605781f65c5a214cb5300 100644 --- a/paddle/fluid/operators/sgd_op.h +++ b/paddle/fluid/operators/sgd_op.h @@ -23,60 +23,97 @@ namespace operators { template class SGDOpKernel : public framework::OpKernel { public: - void Compute(const framework::ExecutionContext& ctx) const override { - auto* param = ctx.Input("Param"); - auto* param_out = ctx.Output("ParamOut"); - auto* learning_rate = ctx.Input("LearningRate"); - - auto* grad_var = ctx.InputVar("Grad"); - // Actually, all tensors are LoDTensor except SelectedRows. - if (grad_var->IsType()) { - param_out->mutable_data(ctx.GetPlace()); - auto* grad = ctx.Input("Grad"); - - auto p = framework::EigenVector::Flatten(*param); - auto g = framework::EigenVector::Flatten(*grad); - auto o = framework::EigenVector::Flatten(*param_out); - auto* lr = learning_rate->data(); - - o = p - lr[0] * g; - } else if (grad_var->IsType()) { - // TODO(qijun): In Sparse SGD operator, in-place update is enforced. - // This manual optimization brings difficulty to track data dependency. - // It's better to find a more elegant solution. - PADDLE_ENFORCE_EQ(param, param_out); - auto* grad = ctx.Input("Grad"); + void Compute(const framework::ExecutionContext &ctx) const override { + const auto *learning_rate = ctx.Input("LearningRate"); + + const auto *param_var = ctx.InputVar("Param"); + const auto *grad_var = ctx.InputVar("Grad"); + + if (param_var->IsType()) { + const auto *param = ctx.Input("Param"); + auto *param_out = ctx.Output("ParamOut"); + + // Actually, all tensors are LoDTensor except SelectedRows. + if (grad_var->IsType()) { + param_out->mutable_data(ctx.GetPlace()); + const auto *grad = ctx.Input("Grad"); + + auto p = framework::EigenVector::Flatten(*param); + auto g = framework::EigenVector::Flatten(*grad); + auto o = framework::EigenVector::Flatten(*param_out); + auto *lr = learning_rate->data(); + + o = p - lr[0] * g; + } else if (grad_var->IsType()) { + // TODO(qijun): In Sparse SGD operator, in-place update is enforced. + // This manual optimization brings difficulty to track data dependency. + // It's better to find a more elegant solution. + PADDLE_ENFORCE_EQ(param, param_out); + const auto *grad = ctx.Input("Grad"); + + // for distributed training, a sparse var may be empty, + // just skip updating. + if (grad->rows().size() == 0) { + return; + } + + auto grad_height = grad->height(); + auto out_dims = param_out->dims(); + PADDLE_ENFORCE_EQ(grad_height, out_dims[0]); + + auto &grad_value = grad->value(); + auto &grad_rows = grad->rows(); + + size_t grad_row_numel = grad_value.numel() / grad_rows.size(); + PADDLE_ENFORCE_EQ(grad_row_numel, param_out->numel() / grad_height); + + auto *grad_data = grad_value.data(); + auto *out_data = param_out->data(); + auto *lr = learning_rate->data(); + for (size_t i = 0; i < grad_rows.size(); i++) { + PADDLE_ENFORCE(grad_rows[i] < grad_height, + "Input rows index should less than height"); + for (int64_t j = 0; j < grad_row_numel; j++) { + out_data[grad_rows[i] * grad_row_numel + j] -= + lr[0] * grad_data[i * grad_row_numel + j]; + } + } + } else { + PADDLE_THROW("Unsupported Variable Type of Grad"); + } + } else if (param_var->IsType()) { + PADDLE_ENFORCE(grad_var->IsType(), + "when param " + "is SelectedRows, gradient should also be SelectedRows"); + const auto ¶m = param_var->Get(); + auto *param_out = ctx.Output("ParamOut"); + const auto &grad = grad_var->Get(); // for distributed training, a sparse var may be empty, // just skip updating. - if (grad->rows().size() == 0) { + if (grad.rows().size() == 0) { return; } - auto in_height = grad->height(); - auto out_dims = param_out->dims(); - PADDLE_ENFORCE_EQ(in_height, out_dims[0]); - - auto& in_value = grad->value(); - auto& in_rows = grad->rows(); + size_t param_row_width = param.value().numel() / param.rows().size(); + size_t grad_row_width = grad.value().numel() / grad.rows().size(); + PADDLE_ENFORCE_EQ(param_row_width, grad_row_width, + "param_row should have the same size with grad_row"); - int64_t in_row_numel = in_value.numel() / in_rows.size(); - PADDLE_ENFORCE_EQ(in_row_numel, param_out->numel() / in_height); - - auto* in_data = in_value.data(); - auto* out_data = param_out->data(); - auto* lr = learning_rate->data(); - for (size_t i = 0; i < in_rows.size(); i++) { - PADDLE_ENFORCE(in_rows[i] < in_height, + const auto *lr = learning_rate->data(); + const auto *grad_data = grad.value().data(); + auto *out_data = param_out->mutable_value()->data(); + for (size_t i = 0; i < grad.rows().size(); i++) { + PADDLE_ENFORCE(grad.rows()[i] < grad.height(), "Input rows index should less than height"); - for (int64_t j = 0; j < in_row_numel; j++) { - out_data[in_rows[i] * in_row_numel + j] -= - lr[0] * in_data[i * in_row_numel + j]; + int64_t id_index = param.index(grad.rows()[i]); + for (int64_t j = 0; j < grad_row_width; j++) { + out_data[id_index * grad_row_width + j] -= + lr[0] * grad_data[i * grad_row_width + j]; } } - } else { - PADDLE_THROW("Unsupported Variable Type of Grad"); + PADDLE_THROW("Unsupported Variable Type of Parameter"); } } }; diff --git a/python/paddle/fluid/tests/unittests/test_lookup_table_op.py b/python/paddle/fluid/tests/unittests/test_lookup_table_op.py index 3f739afd2516fdc2bdf3711d4780a1196c6f3f13..f8d5785fbfe64843f4aa3b96b24809df60980c74 100644 --- a/python/paddle/fluid/tests/unittests/test_lookup_table_op.py +++ b/python/paddle/fluid/tests/unittests/test_lookup_table_op.py @@ -115,18 +115,18 @@ class TestLookupTableWIsSelectedRows(OpTest): w_array = np.ones((len(rows), row_numel)).astype("float32") for i in range(len(rows)): w_array[i] *= i - ids_tensor = w_selected_rows.get_tensor() - ids_tensor.set(w_array, place) + w_tensor = w_selected_rows.get_tensor() + w_tensor.set(w_array, place) # create Out Variable - Out_tensor = scope.var('Out').get_tensor() + out_tensor = scope.var('Out').get_tensor() # create and run lookup_table operator lookup_table = Operator("lookup_table", W='W', Ids='Ids', Out='Out') lookup_table.run(scope, place) # get result from Out - result_array = np.array(Out_tensor) + result_array = np.array(out_tensor) # all(): return True if all elements of the iterable are true (or if the iterable is empty) for idx, row in enumerate(ids_array): assert (row[0] == result_array[idx]).all() diff --git a/python/paddle/fluid/tests/unittests/test_sgd_op.py b/python/paddle/fluid/tests/unittests/test_sgd_op.py index c498b23db12cd83304f4c3a3d1f15bd68ad4f0b6..3126293f9d8e52daa866be5fc1533648a33f3363 100644 --- a/python/paddle/fluid/tests/unittests/test_sgd_op.py +++ b/python/paddle/fluid/tests/unittests/test_sgd_op.py @@ -97,5 +97,72 @@ class TestSparseSGDOp(unittest.TestCase): self.check_with_place(place) +class TestSGDOpOptimizeSelectedRows(unittest.TestCase): + def check_with_place(self, place): + scope = core.Scope() + + row_width = 12 + # create and initialize Grad Variable + grad_height = 10 + grad_rows = [0, 4, 7] + + grad_selected_rows = scope.var('Grad').get_selected_rows() + grad_selected_rows.set_height(grad_height) + grad_selected_rows.set_rows(grad_rows) + grad_array = np.ones((len(grad_rows), row_width)).astype("float32") + grad_array[0, 0] = 2.0 + grad_array[2, 8] = 4.0 + + grad_tensor = grad_selected_rows.get_tensor() + grad_tensor.set(grad_array, place) + + # create and initialize Param Variable + # create and initialize W Variable + param_rows = [0, 1, 2, 3, 4, 5, 6, 7] + + # init Param + w_selected_rows = scope.var('Param').get_selected_rows() + w_selected_rows.set_height(len(param_rows)) + w_selected_rows.set_rows(param_rows) + w_array = np.ones((len(param_rows), row_width)).astype("float32") + for i in range(len(param_rows)): + w_array[i] *= i + w_tensor = w_selected_rows.get_tensor() + w_tensor.set(w_array, place) + + w_before_optimize = np.array(w_tensor) + + # create and initialize LeraningRate Variable + lr_value = 0.1 + lr = scope.var('LearningRate').get_tensor() + lr_array = np.full((1), lr_value).astype("float32") + lr.set(lr_array, place) + + # optimize with Python + w_after_optimize = np.copy(w_before_optimize) + for index, id in enumerate(grad_rows): + w_after_optimize[id] = w_before_optimize[ + id] - lr_value * grad_array[index] + + # create and run sgd operator + sgd_op = Operator( + "sgd", + Param='Param', + Grad='Grad', + ParamOut='Param', + LearningRate='LearningRate') + sgd_op.run(scope, place) + + # get and compare result + result_array = np.array(w_tensor) + assert (result_array == w_after_optimize).all() + + def test_sparse_parameter_sgd(self): + places = [core.CPUPlace()] + # do not support GPU kernel currently + for place in places: + self.check_with_place(place) + + if __name__ == "__main__": unittest.main()