diff --git a/paddle/fluid/framework/inplace_op_inference.h b/paddle/fluid/framework/inplace_op_inference.h index 40026eaca9a92e6acdb60e03578ad41f137e8502..225c8e26b24e5f083d8a2737b3bffcb3de048ead 100644 --- a/paddle/fluid/framework/inplace_op_inference.h +++ b/paddle/fluid/framework/inplace_op_inference.h @@ -43,13 +43,11 @@ class SingleOpInplaceInToOut : public InplaceOpInference { public: std::unordered_map operator()( const OpDesc& op_desc, bool use_cuda) const override { - PADDLE_ENFORCE_EQ(op_desc.InputNames().size(), 1, - "Op inputs must be unique"); - PADDLE_ENFORCE_EQ(op_desc.OutputNames().size(), 1, - "Op outputs must be unique"); - auto x_name = op_desc.InputNames().at(0); - auto out_name = op_desc.OutputNames().at(0); - return std::unordered_map{{x_name, out_name}}; + auto inputs = op_desc.InputNames(); + auto outputs = op_desc.OutputNames(); + PADDLE_ENFORCE_EQ(inputs.size(), 1, "Op inputs must be unique"); + PADDLE_ENFORCE_EQ(outputs.size(), 1, "Op outputs must be unique"); + return {{inputs[0], outputs[0]}}; } }; diff --git a/paddle/fluid/operators/activation_op.h b/paddle/fluid/operators/activation_op.h index ea19dcd3ab08d64e5ee6806df4d70f8be64a9905..8c70c9abccd43dcbf23a73de823144f700388de7 100644 --- a/paddle/fluid/operators/activation_op.h +++ b/paddle/fluid/operators/activation_op.h @@ -210,14 +210,6 @@ struct BaseActivationFunctor { using AttrPair = std::vector>; AttrPair GetAttrs() { return AttrPair(); } - - /* NOTE(*): Output reuse X memory if X is not dependented by its Gradient. - For example, sigmoid op's gradient didn't involve x, so its output can - reuse - input memory. But abs op's gradient use x, it can not be inplaced. - gradient did use x. - */ - bool Inplace() const { return false; } }; // sigmoid(x) = 1 / (1 + exp(-x))