未验证 提交 f1c5815e 编写于 作者: P pangyoki 提交者: GitHub

fix bug of inplace fill_ and zero_ API (#41229)

* fix inplace fill_ and zero_ API

* add eager unittest
上级 93cb2350
...@@ -1824,7 +1824,7 @@ static std::pair<std::string, std::string> GenerateForwardFunctionContents( ...@@ -1824,7 +1824,7 @@ static std::pair<std::string, std::string> GenerateForwardFunctionContents(
// Bump inplace version of inplace tensor. // Bump inplace version of inplace tensor.
auto inplace_input_name = inplace_map[output_name]; auto inplace_input_name = inplace_map[output_name];
const char* FWD_OUT_TENSOR_TEMPLATE = const char* FWD_OUT_TENSOR_TEMPLATE =
" egr::EagerUtils::ModifyInplaceInput(outs[\"%s\"][0], &%s);\n" " egr::EagerUtils::GetOutput(outs[\"%s\"][0], &%s);\n"
" %s.bump_inplace_version();\n" " %s.bump_inplace_version();\n"
" VLOG(3) << \"Tensor(\" << %s.name() << \") uses Inplace " " VLOG(3) << \"Tensor(\" << %s.name() << \") uses Inplace "
"Strategy.\";\n"; "Strategy.\";\n";
......
...@@ -271,27 +271,6 @@ void EagerUtils::HandleViewBetweenInputAndOutput( ...@@ -271,27 +271,6 @@ void EagerUtils::HandleViewBetweenInputAndOutput(
} }
} }
void EagerUtils::ModifyInplaceInput(
const std::shared_ptr<EagerVariable>& inplace_variable,
paddle::experimental::Tensor* inplace_tensor) {
// Only modify the meta information of the inplace tensor, because
// EagerVariable cannot modify Tensor's meta information after inplace
// op (such as ``reshape``) is executed.
PADDLE_ENFORCE_NOT_NULL(inplace_tensor,
paddle::platform::errors::Fatal(
"Inplace Tensor is null and cannot be modified. "
"We are tring to Modify Inplace Input from its "
"shared_ptr, this error may indicate the inplace "
" input is nullptr"));
if (phi::DenseTensor::classof(inplace_variable->GetTensorBase().get())) {
phi::DenseTensor* variable_dense_tensor =
static_cast<phi::DenseTensor*>(inplace_variable->GetTensorBase().get());
phi::DenseTensor* tensor_dense_tensor =
static_cast<phi::DenseTensor*>(inplace_tensor->impl().get());
tensor_dense_tensor->set_meta(variable_dense_tensor->meta());
}
}
std::vector<paddle::experimental::Tensor> EagerUtils::GetOutputs( std::vector<paddle::experimental::Tensor> EagerUtils::GetOutputs(
const std::vector<std::shared_ptr<EagerVariable>>& outs) { const std::vector<std::shared_ptr<EagerVariable>>& outs) {
std::vector<paddle::experimental::Tensor> res; std::vector<paddle::experimental::Tensor> res;
......
...@@ -203,9 +203,6 @@ class EagerUtils { ...@@ -203,9 +203,6 @@ class EagerUtils {
static std::vector<std::shared_ptr<EagerVariable>> CreateVars( static std::vector<std::shared_ptr<EagerVariable>> CreateVars(
const size_t num); const size_t num);
// Construct Tensor From var // Construct Tensor From var
static void ModifyInplaceInput(
const std::shared_ptr<EagerVariable>& inplace_variable,
paddle::experimental::Tensor* inplace_tensor);
static std::vector<paddle::experimental::Tensor> GetOutputs( static std::vector<paddle::experimental::Tensor> GetOutputs(
const std::vector<std::shared_ptr<EagerVariable>>& outs); const std::vector<std::shared_ptr<EagerVariable>>& outs);
static paddle::experimental::Tensor GetOutput( static paddle::experimental::Tensor GetOutput(
......
...@@ -171,6 +171,12 @@ def _test_eager_guard(place=None): ...@@ -171,6 +171,12 @@ def _test_eager_guard(place=None):
if not _already_patch_eager_tensor: if not _already_patch_eager_tensor:
monkey_patch_varbase() monkey_patch_varbase()
monkey_patch_math_varbase() monkey_patch_math_varbase()
# Ugly setting
from paddle.tensor.manipulation import fill_, zero_
setattr(core.eager.Tensor, 'fill_', fill_)
setattr(core.eager.Tensor, 'zero_', zero_)
_already_patch_eager_tensor = True _already_patch_eager_tensor = True
try: try:
yield yield
......
...@@ -17,13 +17,14 @@ import unittest ...@@ -17,13 +17,14 @@ import unittest
import numpy as np import numpy as np
import six import six
import paddle import paddle
from paddle.fluid.framework import _test_eager_guard
class TensorFill_Test(unittest.TestCase): class TensorFill_Test(unittest.TestCase):
def setUp(self): def setUp(self):
self.shape = [32, 32] self.shape = [32, 32]
def test_tensor_fill_true(self): def func_test_tensor_fill_true(self):
typelist = ['float32', 'float64', 'int32', 'int64', 'float16'] typelist = ['float32', 'float64', 'int32', 'int64', 'float16']
places = [fluid.CPUPlace()] places = [fluid.CPUPlace()]
if fluid.core.is_compiled_with_cuda(): if fluid.core.is_compiled_with_cuda():
...@@ -46,7 +47,12 @@ class TensorFill_Test(unittest.TestCase): ...@@ -46,7 +47,12 @@ class TensorFill_Test(unittest.TestCase):
tensor.fill_(var) #var type is basic type in typelist tensor.fill_(var) #var type is basic type in typelist
self.assertEqual((tensor.numpy() == target).all(), True) self.assertEqual((tensor.numpy() == target).all(), True)
def test_tensor_fill_backward(self): def test_tensor_fill_true(self):
with _test_eager_guard():
self.func_test_tensor_fill_true()
self.func_test_tensor_fill_true()
def func_test_tensor_fill_backward(self):
typelist = ['float32'] typelist = ['float32']
places = [fluid.CPUPlace()] places = [fluid.CPUPlace()]
if fluid.core.is_compiled_with_cuda(): if fluid.core.is_compiled_with_cuda():
...@@ -71,13 +77,23 @@ class TensorFill_Test(unittest.TestCase): ...@@ -71,13 +77,23 @@ class TensorFill_Test(unittest.TestCase):
self.assertEqual((y.grad.numpy() == 0).all().item(), True) self.assertEqual((y.grad.numpy() == 0).all().item(), True)
def test_errors(self): def test_tensor_fill_backward(self):
with _test_eager_guard():
self.func_test_tensor_fill_backward()
self.func_test_tensor_fill_backward()
def func_test_errors(self):
def test_list(): def test_list():
x = paddle.to_tensor([2, 3, 4]) x = paddle.to_tensor([2, 3, 4])
x.fill_([1]) x.fill_([1])
self.assertRaises(TypeError, test_list) self.assertRaises(TypeError, test_list)
def test_errors(self):
with _test_eager_guard():
self.func_test_errors()
self.func_test_errors()
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -17,13 +17,14 @@ import unittest ...@@ -17,13 +17,14 @@ import unittest
import numpy as np import numpy as np
import six import six
import paddle import paddle
from paddle.fluid.framework import _test_eager_guard
class TensorFill_Test(unittest.TestCase): class TensorFill_Test(unittest.TestCase):
def setUp(self): def setUp(self):
self.shape = [32, 32] self.shape = [32, 32]
def test_tensor_fill_true(self): def func_test_tensor_fill_true(self):
typelist = ['float32', 'float64', 'int32', 'int64', 'float16'] typelist = ['float32', 'float64', 'int32', 'int64', 'float16']
places = [fluid.CPUPlace()] places = [fluid.CPUPlace()]
if fluid.core.is_compiled_with_cuda(): if fluid.core.is_compiled_with_cuda():
...@@ -41,6 +42,11 @@ class TensorFill_Test(unittest.TestCase): ...@@ -41,6 +42,11 @@ class TensorFill_Test(unittest.TestCase):
tensor.zero_() tensor.zero_()
self.assertEqual((tensor.numpy() == target).all().item(), True) self.assertEqual((tensor.numpy() == target).all().item(), True)
def test_tensor_fill_true(self):
with _test_eager_guard():
self.func_test_tensor_fill_true()
self.func_test_tensor_fill_true()
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册