From 60d5a912f352dc2b894abefaad6bbd0d3c6d088c Mon Sep 17 00:00:00 2001 From: gouzil <66515297+gouzil@users.noreply.github.com> Date: Mon, 10 Oct 2022 11:55:21 +0800 Subject: [PATCH] [docs] add ipustrategy Hyperlink (#46422) * [docs] add ipustrategy Hyperlink * fix ipu_shard_guard docs; test=document_fix * [docs] add set_ipu_shard note * [docs] fix hyperlink * update framework.py * fix mlu_places docs; test=document_fix * fix put_along_axis docs; test=document_fix * fix flake8 W293 error, test=document_fix * fix typo in typing, test=document_fix Co-authored-by: Ligoml <39876205+Ligoml@users.noreply.github.com> Co-authored-by: Nyakku Shigure --- python/paddle/fluid/framework.py | 47 +++++++++++++++------------- python/paddle/tensor/manipulation.py | 5 +-- 2 files changed, 29 insertions(+), 23 deletions(-) diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index e62cb956d98..49c0bb24d6d 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -284,13 +284,13 @@ def ipu_shard_guard(index=-1, stage=-1): The sharded model will be computed from small to large. The default value is -1, which means no pipelining computation order and run Ops in terms of graph. - **Note**: - Only if the enable_manual_shard=True, the 'index' is able to be set not -1. Please refer - to :code:`paddle.static.IpuStrategy` . - Only if the enable_pipelining=True, the 'stage' is able to be set not -1. Please refer - to :code:`paddle.static.IpuStrategy` . - A index is allowed to match none stage or a stage. A stage is only allowed to match a new or - duplicated index. + Note: + Only if the enable_manual_shard=True, the 'index' is able to be set not -1. Please refer + to :ref:`api_paddle_static_IpuStrategy`. + Only if the enable_pipelining=True, the 'stage' is able to be set not -1. Please refer + to :ref:`api_paddle_static_IpuStrategy`. + A index is allowed to match none stage or a stage. A stage is only allowed to match a new or + duplicated index. Examples: .. code-block:: python @@ -329,6 +329,11 @@ def set_ipu_shard(call_func, index=-1, stage=-1): """ Shard the ipu with the given call function. Set every ops in call function to the given ipu sharding. + Note: + Only when enable_manual_shard=True to set the index to a value other than -1. please refer to :ref:`api_paddle_static_IpuStrategy` . + Only when enable_pipelining=True to set stage to a value other than -1. please refer to :ref:`api_paddle_static_IpuStrategy` . + An index supports a corresponding None stage or a stage, and a stage only supports a new index or a duplicate index. + Args: call_func(Layer|function): Specify the call function to be wrapped. index(int, optional): Specify which ipu the Tensor is computed on, (such as ‘0, 1, 2, 3’). @@ -340,7 +345,6 @@ def set_ipu_shard(call_func, index=-1, stage=-1): Returns: The wrapped call function. - Examples: .. code-block:: python @@ -1002,19 +1006,20 @@ def cuda_pinned_places(device_count=None): def mlu_places(device_ids=None): """ - **Note**: + This function creates a list of :code:`paddle.device.MLUPlace` objects. + If :code:`device_ids` is None, environment variable of + :code:`FLAGS_selected_mlus` would be checked first. For example, if + :code:`FLAGS_selected_mlus=0,1,2`, the returned list would + be [paddle.device.MLUPlace(0), paddle.device.MLUPlace(1), paddle.device.MLUPlace(2)]. + If :code:`FLAGS_selected_mlus` is not set, all visible + mlu places would be returned. + If :code:`device_ids` is not None, it should be the device + ids of MLUs. For example, if :code:`device_ids=[0,1,2]`, + the returned list would be + [paddle.device.MLUPlace(0), paddle.device.MLUPlace(1), paddle.device.MLUPlace(2)]. + + Note: For multi-card tasks, please use `FLAGS_selected_mlus` environment variable to set the visible MLU device. - This function creates a list of :code:`paddle.device.MLUPlace` objects. - If :code:`device_ids` is None, environment variable of - :code:`FLAGS_selected_mlus` would be checked first. For example, if - :code:`FLAGS_selected_mlus=0,1,2`, the returned list would - be [paddle.device.MLUPlace(0), paddle.device.MLUPlace(1), paddle.device.MLUPlace(2)]. - If :code:`FLAGS_selected_mlus` is not set, all visible - mlu places would be returned. - If :code:`device_ids` is not None, it should be the device - ids of MLUs. For example, if :code:`device_ids=[0,1,2]`, - the returned list would be - [paddle.device.MLUPlace(0), paddle.device.MLUPlace(1), paddle.device.MLUPlace(2)]. Parameters: device_ids (list or tuple of int, optional): list of MLU device ids. diff --git a/python/paddle/tensor/manipulation.py b/python/paddle/tensor/manipulation.py index b2d2e0d17cb..a161734b1df 100644 --- a/python/paddle/tensor/manipulation.py +++ b/python/paddle/tensor/manipulation.py @@ -4327,8 +4327,9 @@ def put_along_axis(arr, indices, values, axis, reduce='assign'): indices (Tensor) : Indices to put along each 1d slice of arr. This must match the dimension of arr, and need to broadcast against arr. Supported data type are int and int64. axis (int) : The axis to put 1d slices along. - reduce (string | optinal) : The reduce operation, default is 'assign', support 'add', 'assign', 'mul' and 'multiply'. - Returns : + reduce (str, optional): The reduce operation, default is 'assign', support 'add', 'assign', 'mul' and 'multiply'. + + Returns: Tensor: The indexed element, same dtype with arr Examples: -- GitLab