diff --git a/paddle/operators/CMakeLists.txt b/paddle/operators/CMakeLists.txt index bc64bfd7ec2ed27835e5a3f9135343aeb3d4a580..a37720e5093342f5e02bd9a15a3099de434d6396 100644 --- a/paddle/operators/CMakeLists.txt +++ b/paddle/operators/CMakeLists.txt @@ -51,3 +51,5 @@ op_library(softmax_op SRCS softmax_op.cc softmax_op.cu) op_library(fc_op SRCS fc_op.cc DEPS mul_op rowwise_add_op sigmoid_op softmax_op net) + +op_library(sgd_op SRCS sgd_op.cc sgd_op.cu) diff --git a/paddle/operators/sgd_op.cc b/paddle/operators/sgd_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..04df87a3add2af7daa127a072f7b690f6cf94327 --- /dev/null +++ b/paddle/operators/sgd_op.cc @@ -0,0 +1,61 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/sgd_op.h" +#include "paddle/framework/op_registry.h" +#include "paddle/framework/tensor.h" + +namespace paddle { +namespace operators { + +class SGDOp : public framework::OperatorWithKernel { +protected: + void InferShape( + const std::vector &inputs, + const std::vector &outputs) const override { + PADDLE_ENFORCE(inputs.size() == 2, "Input size of SGDOp must be two"); + PADDLE_ENFORCE(outputs.size() == 1, "Output size of SGDOp must be one"); + PADDLE_ENFORCE(inputs[0] != nullptr, "inputs[0] mast be set"); + PADDLE_ENFORCE(inputs[1] != nullptr, "inputs[1] mast be set"); + PADDLE_ENFORCE(outputs[0] != nullptr, "outputs[0] mast be set"); + PADDLE_ENFORCE(inputs[0]->dims() == inputs[1]->dims(), + "Two input of SGD Op's dimension must be same."); + outputs[0]->set_dims(inputs[0]->dims()); + } +}; + +class SGDOpMaker : public framework::OpProtoAndCheckerMaker { +public: + SGDOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) + : framework::OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("param", "input parameter"); + AddInput("grad", "input gradient"); + AddOutput("param_out", "output parameter"); + AddAttr("learning_rate", "learning rate of sgd"); + AddComment(R"DOC( + +Simplest sgd algorithm. + +param_out = param - learning_rate * grad; + +)DOC"); + } +}; +} // namespace operators +} // namespace paddle + +REGISTER_OP(sgd, paddle::operators::SGDOp, paddle::operators::SGDOpMaker); +typedef paddle::operators::SGDOpKernel<::paddle::platform::CPUPlace, float> + SGDOpKernel_CPU_float; +REGISTER_OP_CPU_KERNEL(sgd, SGDOpKernel_CPU_float); diff --git a/paddle/operators/sgd_op.cu b/paddle/operators/sgd_op.cu new file mode 100644 index 0000000000000000000000000000000000000000..400425db10896e3970fc7468e34aba596a536184 --- /dev/null +++ b/paddle/operators/sgd_op.cu @@ -0,0 +1,5 @@ +#include "paddle/operators/sgd_op.h" +#include "paddle/framework/op_registry.h" + +typedef paddle::operators::SGDOpKernel<::paddle::platform::GPUPlace, float> SGDOpKernel_GPU_float; +REGISTER_OP_GPU_KERNEL(sgd, SGDOpKernel_GPU_float); \ No newline at end of file diff --git a/paddle/operators/sgd_op.h b/paddle/operators/sgd_op.h new file mode 100644 index 0000000000000000000000000000000000000000..2ee21ef8f93ae68bfdc3e012f82ea806dc90b6dc --- /dev/null +++ b/paddle/operators/sgd_op.h @@ -0,0 +1,39 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once +#include "glog/logging.h" +#include "paddle/framework/operator.h" + +namespace paddle { +namespace operators { + +template +class SGDOpKernel : public framework::OpKernel { +public: + void Compute(const framework::KernelContext& ctx) const override { + auto param = ctx.Input("param")->Get(); + auto grad = ctx.Input("grad")->Get(); + auto* param_out = ctx.Output(0)->GetMutable(); + float lr = ctx.op_.GetAttr("learning_rate"); + + param_out->mutable_data(ctx.GetPlace()); + + param_out->flat().device(*(ctx.GetEigenDevice())) = + param.flat() - lr * grad.flat(); + } +}; + +} // namespace operators +} // namespace paddle diff --git a/paddle/operators/sgd_op_test.cc b/paddle/operators/sgd_op_test.cc new file mode 100644 index 0000000000000000000000000000000000000000..75137259f5e608b259b073101353e5818bb17c92 --- /dev/null +++ b/paddle/operators/sgd_op_test.cc @@ -0,0 +1,22 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include +USE_OP(sgd); +TEST(SGDOp, GetOpProto) { + auto& protos = paddle::framework::OpRegistry::protos(); + auto it = protos.find("sgd"); + ASSERT_NE(it, protos.end()); +} diff --git a/paddle/pybind/CMakeLists.txt b/paddle/pybind/CMakeLists.txt index 29fb29c7c14f699e6114cc25c265ea8d85bce4d7..6354dd211d5d036e1b5971babaf624e8f847a92b 100644 --- a/paddle/pybind/CMakeLists.txt +++ b/paddle/pybind/CMakeLists.txt @@ -1,2 +1,2 @@ cc_library(paddle_pybind SHARED SRCS pybind.cc DEPS pybind python - add_op fc_op) + add_op fc_op sgd_op) diff --git a/paddle/pybind/pybind.cc b/paddle/pybind/pybind.cc index 7e84550f770e8dba998ce7ff91b9d774acbffc3e..54707a2859693af4a80692bf5cebab59c43ffbc3 100644 --- a/paddle/pybind/pybind.cc +++ b/paddle/pybind/pybind.cc @@ -28,6 +28,7 @@ namespace pd = paddle::framework; USE_OP(add_two); USE_OP_WITHOUT_KERNEL(fc); +USE_OP(sgd); PYBIND11_PLUGIN(core) { py::module m("core", "C++ core of Paddle Paddle"); diff --git a/python/paddle/v2/framework/tests/CMakeLists.txt b/python/paddle/v2/framework/tests/CMakeLists.txt index f71009aa8569beae330b18171043d456b59bca8d..ec076e40c9312fee7f3ba030dc69208069fd45a8 100644 --- a/python/paddle/v2/framework/tests/CMakeLists.txt +++ b/python/paddle/v2/framework/tests/CMakeLists.txt @@ -1,3 +1,3 @@ add_python_test(test_framework test_protobuf.py test_scope.py test_default_scope_funcs.py test_op_creation_methods.py - test_tensor.py test_fc_op.py test_add_two_op.py) + test_tensor.py test_fc_op.py test_add_two_op.py test_sgd_op.py) diff --git a/python/paddle/v2/framework/tests/test_sgd_op.py b/python/paddle/v2/framework/tests/test_sgd_op.py new file mode 100644 index 0000000000000000000000000000000000000000..405d73b224fa153e50b4ec408a921f2bdaab46aa --- /dev/null +++ b/python/paddle/v2/framework/tests/test_sgd_op.py @@ -0,0 +1,18 @@ +import unittest +import numpy +from op_test_util import OpTestMeta + + +class TestSGD(unittest.TestCase): + __metaclass__ = OpTestMeta + + def setUp(self): + self.type = "sgd" + self.param = numpy.random.random((342, 345)).astype("float32") + self.grad = numpy.random.random((342, 345)).astype("float32") + self.learning_rate = 0.1 + self.param_out = self.param - self.learning_rate * self.grad + + +if __name__ == "__main__": + unittest.main()