diff --git a/paddle/fluid/eager/auto_code_generator/eager_generator.cc b/paddle/fluid/eager/auto_code_generator/eager_generator.cc index da13b08b59f80aeb64002d0d2d38cda93e4c366c..3c322565884f22cec36410677f0d55b46171ea7c 100644 --- a/paddle/fluid/eager/auto_code_generator/eager_generator.cc +++ b/paddle/fluid/eager/auto_code_generator/eager_generator.cc @@ -1707,10 +1707,31 @@ static std::pair GenerateForwardFunctionContents( } } } - generated_function_body += "\n"; VLOG(6) << "Generated Outs Map"; + // [Generation] Apply View Strategy (Tensor) + if (inplace_map.empty() && view_op_map.count(op_type)) { + const char* HANDLE_VIEW_BETWEEN_INPUT_AND_OUTPUT = + " if (ins.count(\"%s\") && outs.count(\"%s\")) {\n" + " egr::EagerUtils::HandleViewBetweenInputAndOutput(ins[\"%s\"][0], " + "outs[\"%s\"][0]);\n" + " };\n"; + + std::string view_strategy_str = ""; + std::string viwe_input_name = view_op_map[op_type].first; + std::string viwe_output_name = view_op_map[op_type].second; + view_strategy_str += paddle::string::Sprintf( + HANDLE_VIEW_BETWEEN_INPUT_AND_OUTPUT, viwe_input_name, viwe_output_name, + viwe_input_name, viwe_output_name); + + generated_function_body += view_strategy_str; + generated_function_body += "\n"; + + VLOG(6) << "Generated View Strategy"; + } + generated_function_body += "\n"; + // [Generation] Get Attrs dygraph_function_args_str += ", const paddle::framework::AttributeMap& attr_map"; diff --git a/paddle/fluid/eager/utils.cc b/paddle/fluid/eager/utils.cc index 34dd9d8d34b8c90ce8a01770fad3cb7362552985..5328033fc749b4b18f94cb9076ff14efb41aa28a 100644 --- a/paddle/fluid/eager/utils.cc +++ b/paddle/fluid/eager/utils.cc @@ -244,6 +244,33 @@ std::vector> EagerUtils::CreateVars( return res; } +void EagerUtils::HandleViewBetweenInputAndOutput( + const std::shared_ptr& input_var, + const std::shared_ptr& view_output_var) { + PADDLE_ENFORCE_EQ( + input_var->Var().IsInitialized(), true, + paddle::platform::errors::InvalidArgument( + "Tensor %s has not been initialized!", input_var->name())); + + if (phi::DenseTensor::classof(input_var->GetTensorBase().get())) { + auto input_dense_tensor = + std::dynamic_pointer_cast(input_var->GetTensorBase()); + PADDLE_ENFORCE_EQ( + input_dense_tensor->IsInitialized(), true, + paddle::platform::errors::InvalidArgument( + "DenseTensor %s has not been initialized!", input_var->name())); + + auto* view_output_tensor = + view_output_var->MutableVar()->GetMutable(); + view_output_tensor->ShareBufferWith(*input_dense_tensor); + view_output_tensor->ShareInplaceVersionCounterWith(*input_dense_tensor); + + VLOG(3) << "Perform View between Output Var(" << view_output_var->name() + << ") and Input Var(" << input_var->name() + << "), share allocation and inplace version."; + } +} + void EagerUtils::ModifyInplaceInput( const std::shared_ptr& inplace_variable, paddle::experimental::Tensor* inplace_tensor) { diff --git a/paddle/fluid/eager/utils.h b/paddle/fluid/eager/utils.h index 616a99b9bcc888ee0b6326dbcc3c2798ad4b3077..4c3f5c88e4c9347e92ee0e3f189fe6b624a919ff 100644 --- a/paddle/fluid/eager/utils.h +++ b/paddle/fluid/eager/utils.h @@ -168,6 +168,11 @@ class EagerUtils { } } + // View Strategy + static void HandleViewBetweenInputAndOutput( + const std::shared_ptr& input_var, + const std::shared_ptr& view_output_var); + // TensorWrapper Utils static paddle::experimental::Tensor RecoverTensorWrapper( TensorWrapper* tw, const std::shared_ptr& grad_node); diff --git a/python/paddle/fluid/tests/unittests/test_view_op_reuse_allocation.py b/python/paddle/fluid/tests/unittests/test_view_op_reuse_allocation.py index 85f1999ec878da6de2ddcf42c7e8877a3dd8c10c..92078a69b53a5ab6e9a8ab42db5457715ceb7434 100644 --- a/python/paddle/fluid/tests/unittests/test_view_op_reuse_allocation.py +++ b/python/paddle/fluid/tests/unittests/test_view_op_reuse_allocation.py @@ -29,13 +29,8 @@ from paddle.fluid.framework import _test_eager_guard, in_dygraph_mode # View APIs include: `squeeze`, `unsqueeze`, `reshape`, `flatten`, `detach` class TestDygraphViewReuseAllocation(unittest.TestCase): def setUp(self): - self.set_flag_to_test_eager_mode() self.init_shape() - # some op don't suport eager_final_state in temporary - def set_flag_to_test_eager_mode(self): - self.flag_test_eager_mode = False - def init_shape(self): self.input_shape = [2, 3, 1] self.output_shape = [2, 3] @@ -46,10 +41,7 @@ class TestDygraphViewReuseAllocation(unittest.TestCase): def func_test_view_api(self): var = paddle.rand(self.input_shape) view_var = self.view_api_processing(var) - # setitem don't support inplace in temporary. - # replace setitem with inplace exp_ in temporary. - # view_var[0] = 2. - view_var.exp_() + view_var[0] = 2. self.assertEqual(var.shape, self.input_shape) self.assertEqual(view_var.shape, self.output_shape) @@ -58,9 +50,8 @@ class TestDygraphViewReuseAllocation(unittest.TestCase): self.assertTrue(np.array_equal(var_numpy, view_var_numpy)) def test_view_api(self): - if self.flag_test_eager_mode: - with _test_eager_guard(): - self.func_test_view_api() + with _test_eager_guard(): + self.func_test_view_api() self.func_test_view_api() def func_test_forward_version(self): @@ -69,23 +60,20 @@ class TestDygraphViewReuseAllocation(unittest.TestCase): view_var = self.view_api_processing(var) self.assertEqual(view_var.inplace_version, 0) - # var[0] = 2. - var.exp_() + var[0] = 2. self.assertEqual(var.inplace_version, 1) self.assertEqual(view_var.inplace_version, 1) view_var_2 = self.view_api_processing(var) self.assertEqual(view_var_2.inplace_version, 1) - # var[0] = 3. - var.exp_() + var[0] = 3. self.assertEqual(view_var.inplace_version, 2) self.assertEqual(view_var_2.inplace_version, 2) def test_forward_version(self): - if self.flag_test_eager_mode: - with _test_eager_guard(): - self.func_test_forward_version() + with _test_eager_guard(): + self.func_test_forward_version() self.func_test_forward_version() def func_test_backward_error(self): @@ -100,8 +88,7 @@ class TestDygraphViewReuseAllocation(unittest.TestCase): # Here, the gradient computation will use the value of var_b var_c = var_b**2 view_var_b = self.view_api_processing(var_b) - # view_var_b[0] = 2. # var_b is modified inplace - view_var_b.exp_() + view_var_b[0] = 2. # var_b is modified inplace loss = paddle.nn.functional.relu(var_c) if in_dygraph_mode(): @@ -118,16 +105,12 @@ class TestDygraphViewReuseAllocation(unittest.TestCase): loss.backward() def test_backward_error(self): - if self.flag_test_eager_mode: - with _test_eager_guard(): - self.func_test_backward_error() + with _test_eager_guard(): + self.func_test_backward_error() self.func_test_backward_error() class TestUnsqueezeDygraphViewReuseAllocation(TestDygraphViewReuseAllocation): - def set_flag_to_test_eager_mode(self): - self.flag_test_eager_mode = False - def init_shape(self): self.input_shape = [2, 3] self.output_shape = [2, 3, 1] @@ -137,9 +120,6 @@ class TestUnsqueezeDygraphViewReuseAllocation(TestDygraphViewReuseAllocation): class TestReshapeDygraphViewReuseAllocation(TestDygraphViewReuseAllocation): - def set_flag_to_test_eager_mode(self): - self.flag_test_eager_mode = True - def init_shape(self): self.input_shape = [3, 4] self.output_shape = [2, 2, 3] @@ -149,9 +129,6 @@ class TestReshapeDygraphViewReuseAllocation(TestDygraphViewReuseAllocation): class TestFlattenDygraphViewReuseAllocation(TestDygraphViewReuseAllocation): - def set_flag_to_test_eager_mode(self): - self.flag_test_eager_mode = False - def init_shape(self): self.input_shape = [3, 4] self.output_shape = [12]