未验证 提交 1262e787 编写于 作者: M Meteor Liu 提交者: GitHub

[enhancement] Implement tensor.cpu() in static graph (#55921)

* [enhancement] Implement tensor.cpu() in static graph

* [enhancement] Implement tensor.cpu() in static graph

* [dy2static] add unitest cases for tensor.cpu()

* [dy2static] add unitest cases for tensor.cpu() - run only on paddle compiled with cuda mode

* [dy2static]Refine the format of the api-doc for Variable.cpu().
上级 28b8adb1
...@@ -136,11 +136,41 @@ def monkey_patch_variable(): ...@@ -136,11 +136,41 @@ def monkey_patch_variable():
@static_only @static_only
def cpu(self): def cpu(self):
""" """
Variable should not have cpu() and cuda() interface. In dy2static, Variable also needs cpu() and cuda() interface.
But this interface can greatly facilitate dy2static. But, the underneath operator has only forward op but not backward one.
We do nothing here.
Returns:
The tensor which has copied to cpu place.
Examples:
In Static Graph Mode:
.. code-block:: python
import paddle
paddle.enable_static()
x = paddle.static.data(name="x", shape=[2,2], dtype='float32')
y = x.cpu()
""" """
return self block = current_block(self)
tmp_name = unique_tmp_name()
output = block.create_var(
name=tmp_name,
dtype=self.dtype,
shape=self.shape,
type=self.type,
persistable=False,
stop_gradient=True,
)
# 0 means cpu place, see paddle/fluid/operators/memcpy_op.h
attrs = {'dst_place_type': 0}
block.append_op(
type='memcpy',
inputs={'X': [self]},
outputs={'Out': [output]},
attrs=attrs,
)
return output
@static_only @static_only
def cuda(self): def cuda(self):
......
# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import paddle
@paddle.jit.to_static
def tensor_copy_to_cpu(x):
x = paddle.to_tensor(x)
y = x.cpu()
return y
class TestTensorCopyToCpuOnDefaultCPU(unittest.TestCase):
def _run(self, to_static):
paddle.jit.enable_to_static(to_static)
x1 = paddle.ones([1, 2, 3])
x2 = tensor_copy_to_cpu(x1)
return x1.place, x2.place, x2.numpy()
def test_tensor_cpu_on_default_cpu(self):
paddle.fluid.framework._set_expected_place(paddle.CPUPlace())
dygraph_x1_place, dygraph_place, dygraph_res = self._run(
to_static=False
)
static_x1_place, static_place, static_res = self._run(to_static=True)
np.testing.assert_allclose(dygraph_res, static_res, rtol=1e-05)
self.assertTrue(dygraph_x1_place.is_cpu_place())
self.assertTrue(static_x1_place.is_cpu_place())
self.assertTrue(dygraph_place.is_cpu_place())
self.assertTrue(static_place.is_cpu_place())
if __name__ == '__main__':
unittest.main()
# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
import numpy as np
import paddle
@paddle.jit.to_static
def tensor_copy_to_cpu(x):
x = paddle.to_tensor(x)
y = x.cpu()
return y
class TestTensorCopyToCpuOnDefaultGPU(unittest.TestCase):
def _run(self, to_static):
paddle.jit.enable_to_static(to_static)
x1 = paddle.ones([1, 2, 3])
x2 = tensor_copy_to_cpu(x1)
return x1.place, x2.place, x2.numpy()
def test_tensor_cpu_on_default_gpu(self):
if paddle.fluid.is_compiled_with_cuda():
place = paddle.CUDAPlace(
int(os.environ.get('FLAGS_selected_gpus', 0))
)
else:
return
paddle.fluid.framework._set_expected_place(place)
dygraph_x1_place, dygraph_place, dygraph_res = self._run(
to_static=False
)
static_x1_place, static_place, static_res = self._run(to_static=True)
np.testing.assert_allclose(dygraph_res, static_res, rtol=1e-05)
self.assertTrue(dygraph_x1_place.is_gpu_place())
self.assertTrue(static_x1_place.is_gpu_place())
self.assertTrue(dygraph_place.is_cpu_place())
self.assertTrue(static_place.is_cpu_place())
if __name__ == '__main__':
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册