diff --git a/doc/fluid/api/fluid.rst b/doc/fluid/api/fluid.rst index 1c07ca0a011d02488f833d6474e19985172508c8..7c93e5787e1d6fc232ec25ef9a7663f8fa5622f9 100644 --- a/doc/fluid/api/fluid.rst +++ b/doc/fluid/api/fluid.rst @@ -20,6 +20,7 @@ fluid fluid/DataFeeder.rst fluid/default_main_program.rst fluid/default_startup_program.rst + fluid/device_guard.rst fluid/DistributeTranspiler.rst fluid/DistributeTranspilerConfig.rst fluid/embedding.rst diff --git a/doc/fluid/api/fluid/device_guard.rst b/doc/fluid/api/fluid/device_guard.rst new file mode 100644 index 0000000000000000000000000000000000000000..1cfdb1f90822b44e60cd04503805eb911be80d45 --- /dev/null +++ b/doc/fluid/api/fluid/device_guard.rst @@ -0,0 +1,11 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +.. _api_fluid_device_guard: + +device_guard +----------------------- + +.. autofunction:: paddle.fluid.device_guard + :noindex: + diff --git a/doc/fluid/api_cn/fluid_cn.rst b/doc/fluid/api_cn/fluid_cn.rst old mode 100644 new mode 100755 index 054f39e772568dee67df2cb37acc0e81efc1857a..94eb2b8c1704ceef53ade3edb9971e81cbb18e89 --- a/doc/fluid/api_cn/fluid_cn.rst +++ b/doc/fluid/api_cn/fluid_cn.rst @@ -23,6 +23,7 @@ fluid fluid_cn/DataFeeder_cn.rst fluid_cn/default_main_program_cn.rst fluid_cn/default_startup_program_cn.rst + fluid_cn/device_guard_cn.rst fluid_cn/DistributeTranspiler_cn.rst fluid_cn/DistributeTranspilerConfig_cn.rst fluid_cn/embedding_cn.rst diff --git a/doc/fluid/api_cn/fluid_cn/device_guard_cn.rst b/doc/fluid/api_cn/fluid_cn/device_guard_cn.rst new file mode 100755 index 0000000000000000000000000000000000000000..7d369cc9740652a4d6d4c5a23ff723fdfa0dbdc4 --- /dev/null +++ b/doc/fluid/api_cn/fluid_cn/device_guard_cn.rst @@ -0,0 +1,36 @@ +.. _cn_api_fluid_device_guard: + +device_guard +------------------------------- + +**注意:该API仅支持【静态图】模式** + +.. py:function:: paddle.fluid.device_guard(device=None) + +一个用于指定OP运行设备的上下文管理器。 + +参数: + - **device** (str|None) – 指定上下文中使用的设备。它可以是'cpu'或者'gpu‘,当它被设置为'cpu'或者'gpu'时,创建在该上下文中的OP将被运行在CPUPlace或者CUDAPlace上。若设置为'gpu',同时程序运行在单卡模式下,设备的索引将与执行器的设备索引保持一致。默认值:None,在该上下文中的OP将被自动地分配设备。 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + support_gpu = fluid.is_compiled_with_cuda() + place = fluid.CPUPlace() + if support_gpu: + place = fluid.CUDAPlace(0) + # if GPU is supported, the three OPs below will be automatically assigned to CUDAPlace(0) + data1 = fluid.layers.fill_constant(shape=[1, 3, 8, 8], value=0.5, dtype='float32') + data2 = fluid.layers.fill_constant(shape=[1, 3, 5, 5], value=0.5, dtype='float32') + shape = fluid.layers.shape(data2) + with fluid.device_guard("cpu"): + # Ops created here will be placed on CPUPlace + shape = fluid.layers.slice(shape, axes=[0], starts=[0], ends=[4]) + with fluid.device_guard('gpu'): + # if GPU is supported, OPs created here will be placed on CUDAPlace(0), otherwise on CPUPlace + out = fluid.layers.crop_tensor(data1, shape=shape) + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + result = exe.run(fetch_list=[out])