提交 42e61af8 编写于 作者: X Xin Pan

polish

test=develop
上级 4d9feb35
......@@ -211,8 +211,8 @@ std::map<std::string, std::vector<VarBase*>> OpBase::ApplyGrad() {
std::vector<framework::VariableValueMap> grad_outputs;
if (backward_id_ > 0) {
grad_outputs.resize(1);
VLOG(3) << "py_layer_grad";
grad_outputs.resize(1);
grad_outputs[0][framework::GradVarName(PyLayer::kFwdOut)] =
PyLayer::ApplyGrad(
backward_id_,
......
......@@ -199,8 +199,10 @@ class OpBase {
// For pure python PyLayer, use `forward_id_`, otherwise, use op_desc_.
framework::OpDesc* op_desc_;
int forward_id_;
// When has backward, one of `grad_op_descs_` or `backward_id_` is set,
// not both.
// Note: each fwd op corresponds to a vector of bwd ops.
std::vector<framework::OpDesc*> grad_op_descs_;
int backward_id_;
......@@ -211,8 +213,11 @@ class OpBase {
OpBasePtrMap pre_ops_;
std::map<std::string, std::vector<int>> pre_ops_out_idx_;
// Inputs to a vector of bwd ops.
std::vector<framework::VariableValueMap> grad_input_vars_;
// Outputs to a vector of bwd ops.
std::vector<framework::VariableValueMap> grad_output_vars_;
framework::BlockDesc* block_;
};
......
......@@ -68,19 +68,16 @@ class MLP(fluid.imperative.Layer):
class TestImperative(unittest.TestCase):
def test_sum_op(self):
x = np.ones([2, 2], np.float32)
with fluid.imperative.guard():
inputs = []
for _ in range(10):
inputs.append(
fluid.imperative.base.to_variable(
np.ones([2, 2], np.float32)))
sys.stderr.write('%s\n' % inputs[0].dtype)
inputs.append(fluid.imperative.base.to_variable(x))
ret = fluid.layers.sums(inputs)
sys.stderr.write('%s\n' % ret.dtype)
loss = fluid.layers.reduce_sum(ret)
sys.stderr.write('%s\n' % loss.dtype)
loss._backward()
sys.stderr.write('%s %s\n' % (ret._numpy(), inputs[0]._gradient()))
self.assertTrue(np.allclose(ret._numpy(), x * 10))
self.assertTrue(np.allclose(inputs[0]._gradient(), x))
def test_layer(self):
with fluid.imperative.guard():
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册