未验证 提交 42e56f65 编写于 作者: H HongyuJia 提交者: GitHub

[PolishComments] Polish some code comments (#46032) (#46261)

* polish code comments

* polish data_device_transform.cc
上级 c43ebfcf
...@@ -342,13 +342,12 @@ class OpAttrChecker { ...@@ -342,13 +342,12 @@ class OpAttrChecker {
AttributeMap default_attrs_; AttributeMap default_attrs_;
// in order to improve the efficiency of dynamic graph mode, // in order to improve the efficiency of dynamic graph mode,
// we divede the attribute into explicit type and implicit type. // we divide the attribute into explicit type and implicit type.
// for explicit attribute, we mean the attribute added in the customized // for explicit attribute, we mean the attribute added in the customized
// op makers, usually it's defined in the overloaded Make method. // op makers, usually it's defined in the overloaded Make method.
// for implicit attribute, we mean the attribute added outside of the Make // for implicit attribute, we mean the attribute added outside of the Make
// method like "op_role", "op_role_var", and they are useless in dynamic // method like "op_role", "op_role_var", and they are useless in dynamic
// graph // graph mode
// mode
size_t explicit_checker_num_; size_t explicit_checker_num_;
}; };
......
...@@ -801,7 +801,7 @@ void RegisterOperatorWithMetaInfo(const std::vector<OpMetaInfo>& op_meta_infos, ...@@ -801,7 +801,7 @@ void RegisterOperatorWithMetaInfo(const std::vector<OpMetaInfo>& op_meta_infos,
// Infer Dtype // Infer Dtype
if (infer_dtype_func == nullptr) { if (infer_dtype_func == nullptr) {
// use defalut InferDtype // use default InferDtype
info.infer_var_type_ = [op_inputs, op_outputs](InferVarTypeContext* ctx) { info.infer_var_type_ = [op_inputs, op_outputs](InferVarTypeContext* ctx) {
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
op_inputs.size(), op_inputs.size(),
......
...@@ -51,8 +51,7 @@ void TransDataDevice(const Tensor &in, ...@@ -51,8 +51,7 @@ void TransDataDevice(const Tensor &in,
// the elements of learning rate are one and it's CPU side. // the elements of learning rate are one and it's CPU side.
// One solution is to use a CUDA kernel to complete the copy operation when // One solution is to use a CUDA kernel to complete the copy operation when
// the transforming is from CPU to GPU and the number of elements is little. // the transforming is from CPU to GPU and the number of elements is little.
// But the embarrassment is that this solution this solution makes training // But the embarrassment is that this solution makes training slower.
// slower.
TensorCopySync(in, dst_place, out); TensorCopySync(in, dst_place, out);
} }
......
...@@ -682,7 +682,7 @@ class OperatorWithKernel : public OperatorBase { ...@@ -682,7 +682,7 @@ class OperatorWithKernel : public OperatorBase {
* Transfer data from scope to a transferred scope. If there is no data need * Transfer data from scope to a transferred scope. If there is no data need
* to be transferred, it returns nullptr. * to be transferred, it returns nullptr.
* *
* * transfered_inplace_vars is a output vector. * transfered_inplace_vars is a output vector.
*/ */
Scope* PrepareData(const Scope& scope, Scope* PrepareData(const Scope& scope,
const OpKernelType& expected_kernel_key, const OpKernelType& expected_kernel_key,
......
...@@ -169,6 +169,7 @@ class DygraphInferShapeContext : public framework::InferShapeContext { ...@@ -169,6 +169,7 @@ class DygraphInferShapeContext : public framework::InferShapeContext {
return vec_res; return vec_res;
} }
std::string GetInputNameByIdx(size_t idx) const override { std::string GetInputNameByIdx(size_t idx) const override {
auto& op_proto = auto& op_proto =
paddle::framework::OpInfoMap::Instance().Get(op_type_).proto_; paddle::framework::OpInfoMap::Instance().Get(op_type_).proto_;
......
...@@ -58,7 +58,7 @@ class FillAnyLikeOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -58,7 +58,7 @@ class FillAnyLikeOpMaker : public framework::OpProtoAndCheckerMaker {
AddOutput("Out", "The variable will be filled up with specified value."); AddOutput("Out", "The variable will be filled up with specified value.");
AddAttr<float>("value", "The filled value").SetDefault(0.0); AddAttr<float>("value", "The filled value").SetDefault(0.0);
AddAttr<int>("dtype", AddAttr<int>("dtype",
"Output tensor data type. defalut value is -1," "Output tensor data type. default value is -1,"
"according to the input dtype.") "according to the input dtype.")
.SetDefault(-1); .SetDefault(-1);
AddComment(R"DOC( AddComment(R"DOC(
......
...@@ -321,8 +321,8 @@ void BindCudaStream(py::module *m_ptr) { ...@@ -321,8 +321,8 @@ void BindCudaStream(py::module *m_ptr) {
Parameters: Parameters:
enable_timing(bool, optional): Whether the event will measure time. Default: False. enable_timing(bool, optional): Whether the event will measure time. Default: False.
blocking(bool, optional): Whether the wait() func will be blocking. Default: False; blocking(bool, optional): Whether the wait() func will be blocking. Default: False;
interprocess(bool, optional): Whether the event can be shared between processes. Defalut: False. interprocess(bool, optional): Whether the event can be shared between processes. Default: False.
Examples: Examples:
.. code-block:: python .. code-block:: python
......
...@@ -81,7 +81,7 @@ inline phi::DenseTensor TransDataLayout(const phi::DenseTensor& tensor, ...@@ -81,7 +81,7 @@ inline phi::DenseTensor TransDataLayout(const phi::DenseTensor& tensor,
} }
template <typename Context> template <typename Context>
phi::DenseTensor CastDateType(const Context& dev_ctx, phi::DenseTensor CastDataType(const Context& dev_ctx,
const phi::DenseTensor& tensor, const phi::DenseTensor& tensor,
DataType dtype) { DataType dtype) {
switch (tensor.dtype()) { switch (tensor.dtype()) {
...@@ -111,7 +111,7 @@ phi::DenseTensor CastDateType(const Context& dev_ctx, ...@@ -111,7 +111,7 @@ phi::DenseTensor CastDateType(const Context& dev_ctx,
} }
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
phi::DenseTensor CastDateType(const phi::GPUContext& dev_ctx, phi::DenseTensor CastDataType(const phi::GPUContext& dev_ctx,
const phi::DenseTensor& tensor, const phi::DenseTensor& tensor,
DataType dtype) { DataType dtype) {
switch (tensor.dtype()) { switch (tensor.dtype()) {
...@@ -151,11 +151,11 @@ inline phi::DenseTensor TransDataType(const phi::DenseTensor& tensor, ...@@ -151,11 +151,11 @@ inline phi::DenseTensor TransDataType(const phi::DenseTensor& tensor,
if (platform::is_cpu_place(tensor.place())) { if (platform::is_cpu_place(tensor.place())) {
auto* dev_ctx = static_cast<phi::CPUContext*>(pool.Get(tensor.place())); auto* dev_ctx = static_cast<phi::CPUContext*>(pool.Get(tensor.place()));
return CastDateType(*dev_ctx, tensor, dtype); return CastDataType(*dev_ctx, tensor, dtype);
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
} else if (platform::is_gpu_place(tensor.place())) { } else if (platform::is_gpu_place(tensor.place())) {
auto* dev_ctx = static_cast<phi::GPUContext*>(pool.Get(tensor.place())); auto* dev_ctx = static_cast<phi::GPUContext*>(pool.Get(tensor.place()));
return CastDateType(*dev_ctx, tensor, dtype); return CastDataType(*dev_ctx, tensor, dtype);
#endif #endif
} else { } else {
PADDLE_THROW(phi::errors::Unimplemented( PADDLE_THROW(phi::errors::Unimplemented(
......
...@@ -67,7 +67,7 @@ void PutAlongAxisKernel(const Context& dev_ctx, ...@@ -67,7 +67,7 @@ void PutAlongAxisKernel(const Context& dev_ctx,
PADDLE_THROW(errors::InvalidArgument( PADDLE_THROW(errors::InvalidArgument(
"can not support reduce: '%s' for scatter kernel, only " "can not support reduce: '%s' for scatter kernel, only "
"support reduce op: 'add', 'assign', 'mul' and 'multiply', the " "support reduce op: 'add', 'assign', 'mul' and 'multiply', the "
"defalut reduce " "default reduce "
"op is 'assign' ", "op is 'assign' ",
reduce)); reduce));
return; return;
......
...@@ -68,7 +68,7 @@ void PutAlongAxisKernel(const Context& dev_ctx, ...@@ -68,7 +68,7 @@ void PutAlongAxisKernel(const Context& dev_ctx,
PADDLE_THROW(errors::InvalidArgument( PADDLE_THROW(errors::InvalidArgument(
"can not support reduce: '%s' for scatter kernel, only " "can not support reduce: '%s' for scatter kernel, only "
"support reduce op: 'add', 'assign', 'mul' and 'multiply', the " "support reduce op: 'add', 'assign', 'mul' and 'multiply', the "
"defalut reduce op is 'assign' ", "default reduce op is 'assign' ",
reduce)); reduce));
return; return;
} }
......
...@@ -54,7 +54,7 @@ class DatasetBase(object): ...@@ -54,7 +54,7 @@ class DatasetBase(object):
thread_num(int): thread num, it is the num of readers. default is 1. thread_num(int): thread num, it is the num of readers. default is 1.
use_var(list): list of variables. Variables which you will use. default is []. use_var(list): list of variables. Variables which you will use. default is [].
pipe_command(str): pipe command of current dataset. A pipe command is a UNIX pipeline command that can be used only. default is "cat" pipe_command(str): pipe command of current dataset. A pipe command is a UNIX pipeline command that can be used only. default is "cat"
input_type(int): the input type of generated input. 0 is for one sample, 1 is for one batch. defalut is 0. input_type(int): the input type of generated input. 0 is for one sample, 1 is for one batch. default is 0.
fs_name(str): fs name. default is "". fs_name(str): fs name. default is "".
fs_ugi(str): fs ugi. default is "". fs_ugi(str): fs ugi. default is "".
download_cmd(str): customized download command. default is "cat" download_cmd(str): customized download command. default is "cat"
...@@ -441,7 +441,7 @@ class InMemoryDataset(DatasetBase): ...@@ -441,7 +441,7 @@ class InMemoryDataset(DatasetBase):
batch_size(int): batch size. It will be effective during training. default is 1. batch_size(int): batch size. It will be effective during training. default is 1.
thread_num(int): thread num, it is the num of readers. default is 1. thread_num(int): thread num, it is the num of readers. default is 1.
use_var(list): list of variables. Variables which you will use. default is []. use_var(list): list of variables. Variables which you will use. default is [].
input_type(int): the input type of generated input. 0 is for one sample, 1 is for one batch. defalut is 0. input_type(int): the input type of generated input. 0 is for one sample, 1 is for one batch. default is 0.
fs_name(str): fs name. default is "". fs_name(str): fs name. default is "".
fs_ugi(str): fs ugi. default is "". fs_ugi(str): fs ugi. default is "".
pipe_command(str): pipe command of current dataset. A pipe command is a UNIX pipeline command that can be used only. default is "cat" pipe_command(str): pipe command of current dataset. A pipe command is a UNIX pipeline command that can be used only. default is "cat"
...@@ -522,7 +522,7 @@ class InMemoryDataset(DatasetBase): ...@@ -522,7 +522,7 @@ class InMemoryDataset(DatasetBase):
batch_size(int): batch size. It will be effective during training. default is 1. batch_size(int): batch size. It will be effective during training. default is 1.
thread_num(int): thread num, it is the num of readers. default is 1. thread_num(int): thread num, it is the num of readers. default is 1.
use_var(list): list of variables. Variables which you will use. default is []. use_var(list): list of variables. Variables which you will use. default is [].
input_type(int): the input type of generated input. 0 is for one sample, 1 is for one batch. defalut is 0. input_type(int): the input type of generated input. 0 is for one sample, 1 is for one batch. default is 0.
fs_name(str): fs name. default is "". fs_name(str): fs name. default is "".
fs_ugi(str): fs ugi. default is "". fs_ugi(str): fs ugi. default is "".
pipe_command(str): pipe command of current dataset. A pipe command is a UNIX pipeline command that can be used only. default is "cat" pipe_command(str): pipe command of current dataset. A pipe command is a UNIX pipeline command that can be used only. default is "cat"
......
...@@ -316,7 +316,7 @@ def prune_model(model, n=2, m=4, mask_algo='mask_1d', with_mask=True): ...@@ -316,7 +316,7 @@ def prune_model(model, n=2, m=4, mask_algo='mask_1d', with_mask=True):
m (int, optional): m of `n:m` sparse pattern. Default is 4. m (int, optional): m of `n:m` sparse pattern. Default is 4.
mask_algo (string, optional): The function name to generate spase mask. Default is `mask_1d`. mask_algo (string, optional): The function name to generate spase mask. Default is `mask_1d`.
The vaild inputs should be one of 'mask_1d', 'mask_2d_greedy' and 'mask_2d_best'. The vaild inputs should be one of 'mask_1d', 'mask_2d_greedy' and 'mask_2d_best'.
with_mask (bool, optional): To prune mask Variables related to parameters or not. Ture is purning also, False is not. Defalut is True. with_mask (bool, optional): To prune mask Variables related to parameters or not. Ture is purning also, False is not. Default is True.
Returns: Returns:
dictionary: A dictionary with key: `parameter name` (string) and value: its corresponding mask Variable. dictionary: A dictionary with key: `parameter name` (string) and value: its corresponding mask Variable.
Examples: Examples:
......
...@@ -14912,7 +14912,7 @@ def unique_with_counts(x, dtype='int32'): ...@@ -14912,7 +14912,7 @@ def unique_with_counts(x, dtype='int32'):
Args: Args:
x(Variable): A 1-D input tensor with input shape of :math:`[N]` , the input data type is float32, float64, int32, int64. x(Variable): A 1-D input tensor with input shape of :math:`[N]` , the input data type is float32, float64, int32, int64.
dtype(np.dtype|core.VarDesc.VarType|str): The type of count and index tensor, it could be int32, int64. Defalut value is int32. dtype(np.dtype|core.VarDesc.VarType|str): The type of count and index tensor, it could be int32, int64. Default value is int32.
Returns: Returns:
tuple, the variable type in tuple is Tensor, the output :attr:`out` data type is the same as input :attr:`x`, \ tuple, the variable type in tuple is Tensor, the output :attr:`out` data type is the same as input :attr:`x`, \
......
...@@ -48,7 +48,7 @@ class TestCustomKernelLoad(unittest.TestCase): ...@@ -48,7 +48,7 @@ class TestCustomKernelLoad(unittest.TestCase):
paddle_lib_path = lib_dir paddle_lib_path = lib_dir
self.default_path = os.path.sep.join( self.default_path = os.path.sep.join(
[paddle_lib_path, '..', '..', 'paddle-plugins']) [paddle_lib_path, '..', '..', 'paddle-plugins'])
# copy so to defalut path # copy so to default path
cmd = 'mkdir -p {} && cp ./*.so {}'.format(self.default_path, cmd = 'mkdir -p {} && cp ./*.so {}'.format(self.default_path,
self.default_path) self.default_path)
os.system(cmd) # wait os.system(cmd) # wait
......
...@@ -8,11 +8,11 @@ ...@@ -8,11 +8,11 @@
* `name`: the test's name * `name`: the test's name
* `os`: The supported operator system, ignoring case. If the test run in multiple operator systems, use ";" to split systems, for example, `apple;linux` means the test runs on both Apple and Linux. The supported values are `linux`,`win32` and `apple`. If the value is empty, this means the test runs on all opertaor systems. * `os`: The supported operator system, ignoring case. If the test run in multiple operator systems, use ";" to split systems, for example, `apple;linux` means the test runs on both Apple and Linux. The supported values are `linux`,`win32` and `apple`. If the value is empty, this means the test runs on all opertaor systems.
* `arch`: the device's architecture. similar to `os`, multiple valuse ars splited by ";" and ignoring case. The supported architectures are `gpu`, `xpu`, `ASCEND`, `ASCEND_CL` and `rocm`. * `arch`: the device's architecture. similar to `os`, multiple valuse ars splited by ";" and ignoring case. The supported architectures are `gpu`, `xpu`, `ASCEND`, `ASCEND_CL` and `rocm`.
* `timeout`: timeout of a unittest, whose unit is second. Blank means defalut. * `timeout`: timeout of a unittest, whose unit is second. Blank means default.
* `run_type`: run_type of a unittest. Supported values are `NIGHTLY`, `EXCLUSIVE`, `CINN`, `DIST`, `GPUPS`, `INFER`, `EXCLUSIVE:NIGHTLY`, `DIST:NIGHTLY`,which are case-insensitive. * `run_type`: run_type of a unittest. Supported values are `NIGHTLY`, `EXCLUSIVE`, `CINN`, `DIST`, `GPUPS`, `INFER`, `EXCLUSIVE:NIGHTLY`, `DIST:NIGHTLY`,which are case-insensitive.
* `launcher`: the test launcher.Supported values are test_runner.py, dist_test.sh and custom scripts' name. Blank means test_runner.py. * `launcher`: the test launcher.Supported values are test_runner.py, dist_test.sh and custom scripts' name. Blank means test_runner.py.
* `num_port`: the number of port used in a distributed unit test. Blank means automatically distributed port. * `num_port`: the number of port used in a distributed unit test. Blank means automatically distributed port.
* `run_serial`: whether in serial mode. the value can be 1 or 0.Default (empty) is 0. Blank means defalut. * `run_serial`: whether in serial mode. the value can be 1 or 0.Default (empty) is 0. Blank means default.
* `ENVS`: required environments. multiple envirenmonts are splited by ";". * `ENVS`: required environments. multiple envirenmonts are splited by ";".
* `conditions`: extra required conditions for some tests. The value is a list of boolean expression in cmake programmer, splited with ";". For example, the value can be `WITH_DGC;NOT WITH_NCCL` or `WITH_NCCL;${NCCL_VERSION} VERSION_GREATER_EQUAL 2212`,The relationship between these expressions is a conjunction. * `conditions`: extra required conditions for some tests. The value is a list of boolean expression in cmake programmer, splited with ";". For example, the value can be `WITH_DGC;NOT WITH_NCCL` or `WITH_NCCL;${NCCL_VERSION} VERSION_GREATER_EQUAL 2212`,The relationship between these expressions is a conjunction.
......
...@@ -78,7 +78,7 @@ class BatchNorm(paddle.nn.BatchNorm1D): ...@@ -78,7 +78,7 @@ class BatchNorm(paddle.nn.BatchNorm1D):
If it is set to None or one attribute of ParamAttr, batch_norm If it is set to None or one attribute of ParamAttr, batch_norm
will create ParamAttr as bias_attr. If it is set to Fasle, the weight is not learnable. will create ParamAttr as bias_attr. If it is set to Fasle, the weight is not learnable.
If the Initializer of the bias_attr is not set, the bias is initialized zero. Default: None. If the Initializer of the bias_attr is not set, the bias is initialized zero. Default: None.
data_format(str, optional): Specify the input data format, may be "NC", "NCL" or "NLC". Defalut "NCL". data_format(str, optional): Specify the input data format, may be "NC", "NCL" or "NLC". Default "NCL".
use_global_stats(bool|None, optional): Whether to use global mean and variance. If set to False, use the statistics of one mini-batch, if set to True, use the global statistics, if set to None, use global statistics in the test phase and use the statistics of one mini-batch in the training phase. Default: None. use_global_stats(bool|None, optional): Whether to use global mean and variance. If set to False, use the statistics of one mini-batch, if set to True, use the global statistics, if set to None, use global statistics in the test phase and use the statistics of one mini-batch in the training phase. Default: None.
name(str, optional): Name for the BatchNorm, default is None. For more information, please refer to :ref:`api_guide_Name`.. name(str, optional): Name for the BatchNorm, default is None. For more information, please refer to :ref:`api_guide_Name`..
......
...@@ -140,8 +140,8 @@ def batch_norm(x, ...@@ -140,8 +140,8 @@ def batch_norm(x,
bias(Tensor): The bias tensor of batch_norm can not be None. bias(Tensor): The bias tensor of batch_norm can not be None.
epsilon(float, optional): The small value added to the variance to prevent division by zero. Default: 1e-5. epsilon(float, optional): The small value added to the variance to prevent division by zero. Default: 1e-5.
momentum(float, optional): The value used for the moving_mean and moving_var computation. Default: 0.9. momentum(float, optional): The value used for the moving_mean and moving_var computation. Default: 0.9.
training(bool, optional): True means train mode which compute by batch data and track global mean and var during train period. False means inference mode which compute by global mean and var which calculated by train period. Defalut False. training(bool, optional): True means train mode which compute by batch data and track global mean and var during train period. False means inference mode which compute by global mean and var which calculated by train period. Default False.
data_format(str, optional): Specify the input data format, may be "NC", "NCL", "NCHW", "NCDHW", "NLC", "NHWC" or "NDHWC". Defalut "NCHW". data_format(str, optional): Specify the input data format, may be "NC", "NCL", "NCHW", "NCDHW", "NLC", "NHWC" or "NDHWC". Default "NCHW".
use_global_stats(bool|None, optional): Whether to use global mean and variance. If set to False, use the statistics of one mini-batch, if set to True, use the global statistics, if set to None, use global statistics in the test phase and use the statistics of one mini-batch in the training phase. Default: None. use_global_stats(bool|None, optional): Whether to use global mean and variance. If set to False, use the statistics of one mini-batch, if set to True, use the global statistics, if set to None, use global statistics in the test phase and use the statistics of one mini-batch in the training phase. Default: None.
name(str, optional): Name for the BatchNorm, default is None. For more information, please refer to :ref:`api_guide_Name`.. name(str, optional): Name for the BatchNorm, default is None. For more information, please refer to :ref:`api_guide_Name`..
...@@ -392,7 +392,7 @@ def instance_norm(x, ...@@ -392,7 +392,7 @@ def instance_norm(x,
eps(float, optional): A value added to the denominator for numerical stability. Default is 1e-5. eps(float, optional): A value added to the denominator for numerical stability. Default is 1e-5.
momentum(float, optional): The value used for the moving_mean and moving_var computation. Default: 0.9. momentum(float, optional): The value used for the moving_mean and moving_var computation. Default: 0.9.
use_input_stats(bool): Default True. use_input_stats(bool): Default True.
data_format(str, optional): Specify the input data format, may be "NC", "NCL", "NCHW" or "NCDHW". Defalut "NCHW". data_format(str, optional): Specify the input data format, may be "NC", "NCL", "NCHW" or "NCDHW". Default "NCHW".
name(str, optional): Name for the InstanceNorm, default is None. For more information, please refer to :ref:`api_guide_Name`.. name(str, optional): Name for the InstanceNorm, default is None. For more information, please refer to :ref:`api_guide_Name`..
Returns: Returns:
......
...@@ -144,7 +144,7 @@ class InstanceNorm1D(_InstanceNormBase): ...@@ -144,7 +144,7 @@ class InstanceNorm1D(_InstanceNormBase):
will create ParamAttr as bias_attr, the name of bias can be set in ParamAttr. will create ParamAttr as bias_attr, the name of bias can be set in ParamAttr.
If the Initializer of the bias_attr is not set, the bias is initialized zero. If the Initializer of the bias_attr is not set, the bias is initialized zero.
If it is set to False, will not create bias_attr. Default: None. If it is set to False, will not create bias_attr. Default: None.
data_format(str, optional): Specify the input data format, may be "NC", "NCL". Defalut "NCL". data_format(str, optional): Specify the input data format, may be "NC", "NCL". Default "NCL".
name(str, optional): Name for the InstanceNorm, default is None. For more information, please refer to :ref:`api_guide_Name`.. name(str, optional): Name for the InstanceNorm, default is None. For more information, please refer to :ref:`api_guide_Name`..
...@@ -743,7 +743,7 @@ class BatchNorm1D(_BatchNormBase): ...@@ -743,7 +743,7 @@ class BatchNorm1D(_BatchNormBase):
If it is set to None or one attribute of ParamAttr, batch_norm If it is set to None or one attribute of ParamAttr, batch_norm
will create ParamAttr as bias_attr. If it is set to Fasle, the weight is not learnable. will create ParamAttr as bias_attr. If it is set to Fasle, the weight is not learnable.
If the Initializer of the bias_attr is not set, the bias is initialized zero. Default: None. If the Initializer of the bias_attr is not set, the bias is initialized zero. Default: None.
data_format(str, optional): Specify the input data format, may be "NC", "NCL" or "NLC". Defalut "NCL". data_format(str, optional): Specify the input data format, may be "NC", "NCL" or "NLC". Default "NCL".
use_global_stats(bool|None, optional): Whether to use global mean and variance. If set to False, use the statistics of one mini-batch, if set to True, use the global statistics, if set to None, use global statistics in the test phase and use the statistics of one mini-batch in the training phase. Default: None. use_global_stats(bool|None, optional): Whether to use global mean and variance. If set to False, use the statistics of one mini-batch, if set to True, use the global statistics, if set to None, use global statistics in the test phase and use the statistics of one mini-batch in the training phase. Default: None.
name(str, optional): Name for the BatchNorm, default is None. For more information, please refer to :ref:`api_guide_Name`.. name(str, optional): Name for the BatchNorm, default is None. For more information, please refer to :ref:`api_guide_Name`..
......
...@@ -276,7 +276,7 @@ def norm(x, p='fro', axis=None, keepdim=False, name=None): ...@@ -276,7 +276,7 @@ def norm(x, p='fro', axis=None, keepdim=False, name=None):
or list(int)/tuple(int) with only one element, the vector norm is computed over the axis. or list(int)/tuple(int) with only one element, the vector norm is computed over the axis.
If `axis < 0`, the dimension to norm operation is rank(input) + axis. If `axis < 0`, the dimension to norm operation is rank(input) + axis.
If axis is a list(int)/tuple(int) with two elements, the matrix norm is computed over the axis. If axis is a list(int)/tuple(int) with two elements, the matrix norm is computed over the axis.
Defalut value is `None`. Default value is `None`.
keepdim (bool, optional): Whether to reserve the reduced dimension in the keepdim (bool, optional): Whether to reserve the reduced dimension in the
output Tensor. The result tensor will have fewer dimension output Tensor. The result tensor will have fewer dimension
than the :attr:`input` unless :attr:`keepdim` is true, default than the :attr:`input` unless :attr:`keepdim` is true, default
...@@ -2589,7 +2589,7 @@ def pinv(x, rcond=1e-15, hermitian=False, name=None): ...@@ -2589,7 +2589,7 @@ def pinv(x, rcond=1e-15, hermitian=False, name=None):
True. True.
rcond(Tensor, optional): the tolerance value to determine rcond(Tensor, optional): the tolerance value to determine
when is a singular value zero. Defalut:1e-15. when is a singular value zero. Default:1e-15.
hermitian(bool, optional): indicates whether x is Hermitian hermitian(bool, optional): indicates whether x is Hermitian
if complex or symmetric if real. Default: False. if complex or symmetric if real. Default: False.
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册