diff --git a/paddle/operators/sgd_op.cc b/paddle/operators/sgd_op.cc index 3bce95535cf10c0df95b503c6e362b3f0ba2e723..8f9eae4186ad848fcecd74b4ab22711f8bb99e2a 100644 --- a/paddle/operators/sgd_op.cc +++ b/paddle/operators/sgd_op.cc @@ -27,6 +27,8 @@ class SGDOp : public framework::OperatorWithKernel { "Input(param) of SGDOp should not be null."); PADDLE_ENFORCE(ctx->HasInput("grad"), "Input(grad) of SGDOp should not be null."); + PADDLE_ENFORCE(ctx->HasInput("learning_rate"), + "Input(learning_rate) of SGDOp should not be null."); PADDLE_ENFORCE(ctx->HasOutput("param_out"), "Output(param_out) of SGDOp should not be null."); @@ -42,9 +44,9 @@ class SGDOpMaker : public framework::OpProtoAndCheckerMaker { SGDOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("param", "input parameter"); + AddInput("learning_rate", "learning rate of sgd"); AddInput("grad", "input gradient"); AddOutput("param_out", "output parameter"); - AddAttr("learning_rate", "learning rate of sgd"); AddComment(R"DOC( Simplest sgd algorithm. diff --git a/paddle/operators/sgd_op.h b/paddle/operators/sgd_op.h index a3fe3308942f98e2c28376b589b6fc930e6878a1..977d201ced31c498c2ab41cf6d412756cabb3aee 100644 --- a/paddle/operators/sgd_op.h +++ b/paddle/operators/sgd_op.h @@ -31,7 +31,7 @@ class SGDOpKernel : public framework::OpKernel { auto param = ctx.Input("param"); auto grad = ctx.Input("grad"); auto param_out = ctx.Output("param_out"); - float lr = ctx.Attr("learning_rate"); + float lr = *ctx.Input("learning_rate"); param_out->mutable_data(ctx.GetPlace()); diff --git a/paddle/pybind/pybind.cc b/paddle/pybind/pybind.cc index f4121e9d71824296770f86c1e94c096f767dec0a..d480427f593ccba20ef6555809a524654d0e54b8 100644 --- a/paddle/pybind/pybind.cc +++ b/paddle/pybind/pybind.cc @@ -143,6 +143,13 @@ All parameter, weight, gradient are variables in Paddle. .def("set_int", [](Variable &var, int val) -> void { *var.GetMutable() = val; }) .def("get_int", [](const Variable &var) -> int { return var.Get(); }) + .def("is_float", [](const Variable &var) { return var.IsType(); }) + .def("set_float", + [](Variable &var, float val) -> void { + *var.GetMutable() = val; + }) + .def("get_float", + [](const Variable &var) -> float { return var.Get(); }) .def("get_tensor", [](Variable &self) -> LoDTensor * { return self.GetMutable(); diff --git a/python/paddle/v2/framework/tests/op_test.py b/python/paddle/v2/framework/tests/op_test.py index 75df2eeddfe67269d4709887c7cfdb8fab108bd8..81067f38bbf64ac1ab4ccf02aa43b0a38b7d48ad 100644 --- a/python/paddle/v2/framework/tests/op_test.py +++ b/python/paddle/v2/framework/tests/op_test.py @@ -46,12 +46,17 @@ def create_op(scope, op_type, inputs, outputs, attrs): def set_input(scope, op, inputs, place): def __set_input__(var_name, var): - tensor = scope.find_var(var_name).get_tensor() - if isinstance(var, tuple): - tensor.set_lod(var[1]) - var = var[0] - tensor.set_dims(var.shape) - tensor.set(var, place) + if isinstance(var, tuple) or isinstance(var, np.ndarray): + tensor = scope.find_var(var_name).get_tensor() + if isinstance(var, tuple): + tensor.set_lod(var[1]) + var = var[0] + tensor.set_dims(var.shape) + tensor.set(var, place) + elif isinstance(var, float): + scope.find_var(var_name).set_float(var) + elif isinstance(var, int): + scope.find_var(var_name).set_int(var) for in_name, in_dup in Operator.get_op_inputs(op.type()): if in_name in inputs: diff --git a/python/paddle/v2/framework/tests/test_sgd_op.py b/python/paddle/v2/framework/tests/test_sgd_op.py index 64e54d1500c1bc134cc1efe33d41a16dbc08f2d4..f1125f4edb5248abb2a0128a7a8b8b3647ed3317 100644 --- a/python/paddle/v2/framework/tests/test_sgd_op.py +++ b/python/paddle/v2/framework/tests/test_sgd_op.py @@ -10,8 +10,7 @@ class TestSGDOp(OpTest): g = np.random.random((102, 105)).astype("float32") lr = 0.1 - self.inputs = {'param': w, 'grad': g} - self.attrs = {'learning_rate': lr} + self.inputs = {'param': w, 'grad': g, 'learning_rate': lr} self.outputs = {'param_out': w - lr * g} def test_check_output(self):