未验证 提交 c36a000d 编写于 作者: C cyberslack_lee 提交者: GitHub

fix typos(#53967)

上级 35f1a89e
......@@ -166,7 +166,7 @@ class GeneralGrad {
} // TODO(jiabin): May we need some check here.
}
// Get Graph Info Betweent input target GradNode and outputs
// Get Graph Info Betweent input target GradNode and outputs,
// record depending_nodes_
void GetGraphInfoBetweenTargets(const std::deque<GradNodeBase*>& init_queue) {
VLOG(6) << "Runing In GetGraphInfoBetweenTargets";
......
......@@ -373,7 +373,7 @@ class DownpourWorker : public HogwildWorker {
// std::vector<std::pair<uint64_t, uint64_t>> copy_dense_tables_;
};
// Based on DownpourWorkerremove push pull code into operator
// Based on DownpourWorker, remove push pull code into operator
#if defined(PADDLE_WITH_PSCORE)
class DownpourLiteWorker : public HogwildWorker {
public:
......
......@@ -189,7 +189,7 @@ void MultiDevSSAGraphBuilderBase::Init() const {
platform::errors::InvalidArgument(
"Places size and LocalScopes not equal "
"Places size(%d), LocalScopes size(%d) "
"If use multi devices Places size must equas to LocalScopes size.",
"If use multi devices, Places size must equas to LocalScopes size.",
places_.size(),
local_scopes_.size()));
}
......@@ -875,7 +875,7 @@ size_t BalanceVarSSAGraphBuilder::GetAppropriateDeviceID(
0,
platform::errors::InvalidArgument(
"The numel of Var(%s) must greater than 0"
"Please check your codeabout Var(%s) Shape.",
"Please check your code, about Var(%s) Shape.",
var_name,
var_name));
numel_sum += numel;
......
......@@ -468,7 +468,7 @@ SquaredMatSubFusePass::SquaredMatSubFusePass() {
.End()
.AddAttr("shape")
.End()
// type:floatthere is no restriction
// type:float, there is no restriction
.AddAttr("value")
.End()
.AddAttr("str_value")
......
......@@ -149,7 +149,7 @@ int FindMapByValue(const std::map<int, int>& m, int val) {
return -1;
}
// In other two casesthe op that has feed vars as output vars is dependent:
// In other two cases, the op that has feed vars as output vars is dependent:
// 1. op has subblock, like while/for/ifelse/recurrent
// 2. op is in subblock
bool IsSubBlockDependent(const proto::OpDesc& op_desc,
......
......@@ -693,7 +693,7 @@ std::string TensorRtSubgraphPass::CreateTensorRTOp(
}
}
// If with_dynamic_shape is configuredbut min_input_shape is empty,
// If with_dynamic_shape is configured, but min_input_shape is empty,
// create trt engine in runtime instead of in pass.
if (with_dynamic_shape && min_input_shape.empty()) {
return engine_key + std::to_string(predictor_id);
......
......@@ -358,7 +358,7 @@ void MemoryOptimizePass::RunImpl(Argument* argument) {
// mapping table.
if (!argument->enable_memory_optim()) return;
// Because of pass is a singleton, graph can not be member
// variablesotherwise, errors will be caused under multithreading
// variables, otherwise, errors will be caused under multithreading
// conditions.
auto graph = argument->main_graph_ptr();
......
......@@ -2863,7 +2863,7 @@ Predictor::Predictor(const Config &config) {
"and it falls back to use Paddle Inference.";
} else if (!paddle::CheckConvertToONNX(config)) {
LOG(WARNING)
<< "Paddle2ONNX do't support convert the Model fall back to using "
<< "Paddle2ONNX do't support convert the Model, fall back to using "
"Paddle Inference.";
} else {
predictor_ =
......
......@@ -155,7 +155,7 @@ inline int round_up(int seq_len, int multiple = 32) {
multiple,
0,
platform::errors::InvalidArgument(
"multiple should be a positive numberbut it's (%d)", multiple));
"multiple should be a positive number, but it's (%d)", multiple));
return ((seq_len + multiple - 1) / multiple) * multiple;
}
......
......@@ -41,7 +41,7 @@ inline int round_up(int seq_len, int multiple = 32) {
multiple,
0,
platform::errors::InvalidArgument(
"multiple should be a positive numberbut it's (%d)", multiple));
"multiple should be a positive number, but it's (%d)", multiple));
return ((seq_len + multiple - 1) / multiple) * multiple;
}
......
......@@ -43,7 +43,7 @@ class FeedForward {
T* output_data,
T* bias_out_data) {
// Note: for blas.GEMM API in Paddle, it treats all inputs as row-major.
// To convert to col-major expression, transa<->transb, A<->Bm<->n.
// To convert to col-major expression, transa<->transb, A<->B, m<->n.
// column-major: gemm-tn.
CBLAS_TRANSPOSE transA = CblasNoTrans;
......
......@@ -396,7 +396,7 @@ class FusedMultiTransformerOpKernel : public framework::OpKernel<T> {
dim_head,
compute_bias);
// q_transpose_out_data [bs, head_num, seq_len, dim_head]
// kv_transpose_out_data [2 bs, head_num, seq_len, dim_head]
// kv_transpose_out_data [2, bs, head_num, seq_len, dim_head]
if (rotary_emb_dims != 0) {
auto *rotary_emb_data = rotary_tensor->data<T>();
const int *sequence_lengths_data =
......@@ -483,7 +483,7 @@ class FusedMultiTransformerOpKernel : public framework::OpKernel<T> {
compute_bias);
// q_transpose_out_data [bs, head_num, seq_len, dim_head]
// kv_transpose_out_data [2 bs, head_num, seq_len, dim_head]
// kv_transpose_out_data [2, bs, head_num, seq_len, dim_head]
if (rotary_emb_dims != 0) {
auto *rotary_emb_data = rotary_tensor->data<T>();
const int *sequence_lengths_data =
......@@ -1071,7 +1071,7 @@ class FusedMultiTransformerOpKernel : public framework::OpKernel<T> {
compute_bias);
// q_transpose_out_data [bs, head_num, seq_len, dim_head]
// kv_transpose_out_data [2 bs, head_num, seq_len, dim_head]
// kv_transpose_out_data [2, bs, head_num, seq_len, dim_head]
if (rotary_emb_dims != 0) {
auto *rotary_emb_data = rotary_tensor->data<T>();
const int *sequence_lengths_data =
......@@ -1158,7 +1158,7 @@ class FusedMultiTransformerOpKernel : public framework::OpKernel<T> {
compute_bias);
// q_transpose_out_data [bs, head_num, seq_len, dim_head]
// kv_transpose_out_data [2 bs, head_num, seq_len, dim_head]
// kv_transpose_out_data [2, bs, head_num, seq_len, dim_head]
if (rotary_emb_dims != 0) {
auto *rotary_emb_data = rotary_tensor->data<T>();
const int *sequence_lengths_data =
......
......@@ -241,7 +241,7 @@ inline int round_up(int seq_len, int multiple = 32) {
multiple,
0,
platform::errors::InvalidArgument(
"multiple should be a positive numberbut it's (%d)", multiple));
"multiple should be a positive number, but it's (%d)", multiple));
return ((seq_len + multiple - 1) / multiple) * multiple;
}
......
......@@ -471,7 +471,7 @@ struct DeviceIndependenceTensorOperations {
NameInTensorMap inputs({{"X", {&x}}});
return CreateOpRunAndReturnTensor("reduce_max", inputs, attrs, out_dim);
}
// Support float and complex type subtractionthe default is T type
// Support float and complex type subtraction, the default is T type
template <typename InT = T>
phi::DenseTensor Sub(const phi::DenseTensor& x, const phi::DenseTensor& y) {
phi::DenseTensor ret;
......
......@@ -288,7 +288,7 @@ static void InitVarBaseFromTensorWithArgDefault(imperative::VarBase *self,
self->SetType(framework::proto::VarType::LOD_TENSOR);
self->SetDataType(framework::TransToProtoVarType(tensor.dtype()));
auto *new_tensor = self->MutableVar()->GetMutable<phi::DenseTensor>();
// Same placeshare data directly
// Same place, share data directly
if (place == tensor.place()) {
new_tensor->ShareDataWith(tensor);
VLOG(4) << "Same place, do ShareDataWith";
......@@ -312,7 +312,7 @@ static void InitVarBaseFromTensorWithArg(imperative::VarBase *self,
self->SetType(framework::proto::VarType::LOD_TENSOR);
self->SetDataType(framework::TransToProtoVarType(tensor.dtype()));
auto *new_tensor = self->MutableVar()->GetMutable<phi::DenseTensor>();
// Same placeshare data directly
// Same place, share data directly
if (platform::is_same_place(place, tensor.place())) {
new_tensor->ShareDataWith(tensor);
VLOG(4) << "Same place, do ShareDataWith";
......
......@@ -34,8 +34,8 @@ using Deleter = std::function<void(void*)>;
* `data`. See PD_FOR_EACH_DATA_TYPE in `phi/common/data_type.h`
* @param layout The data layout of the tensor.
* @param place The place where the tensor is located.
* If `place` is default value, it will be inferred from `data`
* Howeverthe feature is only supported on CPU or GPU.
* If `place` is default value, it will be inferred from `data`,
* However, the feature is only supported on CPU or GPU.
* If `place` is not default value, make sure that `place` is equal
* to the place of `data`
* @param deleter A function or function object that will be called to free the
......
......@@ -144,7 +144,7 @@ struct PADDLE_ALIGN(2) bfloat16 {
return *this;
}
// Conversion opertors
// Conversion operators
HOSTDEVICE inline operator float() const {
#ifdef PADDLE_WITH_HIP
uint32_t res = 0;
......
......@@ -215,7 +215,7 @@ struct PADDLE_ALIGN(2) float16 {
return *this;
}
// Conversion opertors
// Conversion operators
#ifdef PADDLE_CUDA_FP16
HOSTDEVICE inline half to_half() const {
#if defined(PADDLE_WITH_HIP) || CUDA_VERSION >= 9000
......
......@@ -35,7 +35,7 @@ void FlattenInferKernel(const Context& dev_ctx,
}
// TODO(yuanrisheng): this kernel is for training and xshape is a Intermediate
// Output Tensor
// Output Tensor,
// is there a more flexible way to deal with this case?
template <typename T, typename Context>
void FlattenKernel(const Context& dev_ctx,
......
......@@ -1099,7 +1099,7 @@ void ln_bwd_fast_kernel_driver(const phi::GPUContext &dev_ctx,
WARPS_M_2 * THREADS_PER_ROW_2; // 16 * 32 = 512
const int ROWS_PER_CTA_2 = WARPS_M_2; // 16
// #blocks: 32#threads_per_block: 512
// #blocks: 32, #threads_per_block: 512
// Note: it is not supported for double type.
if (sizeof(U) > 4) {
PADDLE_THROW(
......
......@@ -74,7 +74,7 @@ struct ExpAddFunctor {
/*
Cross entropy soft label with dynamic size on axis (log2_elements is
varibale).
- if the input is softmaxcompute loss with softmax
- if the input is softmax, compute loss with softmax
- if the input is log_softmax, compute loss with log_softmax and update
softmax
*/
......
......@@ -178,7 +178,7 @@ def _prune_gate_by_capacity(gate_idx, expert_count, n_expert, n_worker):
Args:
gate_idx (Tensor): Represents the gate_id sequence corresponding to the input data with type int32, int64.
expert_count (Tensor): The quantity value counted on the gate_id sequence of the input data with type int32, int64.
n_worker(intoptional): The number of workers on the trainer with type int64.
n_worker(int, optional): The number of workers on the trainer with type int64.
Returns:
new_gate_idx (Tensor): The gate_id sequence corresponding to the new input data after passing through prune.
......
......@@ -1075,78 +1075,78 @@ class FusedMultiTransformer(Layer):
ln_scale_attrs(ParamAttr|list|tuple, optional): To specify the weight parameter property
for Attention layer_norm. For Attention layer_norm weight, if it is a list/tuple, `attrs[0]`
would be used as `attr` for transformer layer 0, and `attrs[1]` would be used as
`attr` for transformer layer 1etc. Otherwise, all layers both use it as
`attr` for transformer layer 1, etc. Otherwise, all layers both use it as
`attr` to create parameters. Default: None, which means the default weight
parameter property is used. See usage for details in :code:`ParamAttr`.
ln_bias_attrs(ParamAttr|list|tuple|bool, optional): To specify the bias parameter property
for Attention layer_norm. For Attention layer_norm bias, if it is a list/tuple, `attrs[0]`
would be used as `attr` for transformer layer 0, and `attrs[1]` would be used as
`attr` for transformer layer 1etc. Otherwise, all layers both use it as
`attr` for transformer layer 1, etc. Otherwise, all layers both use it as
`attr` to create parameters. The `False` value means the corresponding layer would
not have trainable bias parameter. Default: None, which means the default bias
parameter property is used. See usage for details in :code:`ParamAttr`.
qkv_weight_attrs(ParamAttr|list|tuple, optional): To specify the weight parameter property
for Attention qkv computation. For Attention qkv weight, if it is a list/tuple, `attrs[0]`
would be used as `attr` for transformer layer 0, and `attrs[1]` would be used as
`attr` for transformer layer 1etc. Otherwise, all layers both use it as
`attr` for transformer layer 1, etc. Otherwise, all layers both use it as
`attr` to create parameters. Default: None, which means the default weight
parameter property is used. See usage for details in :code:`ParamAttr`.
qkv_bias_attrs(ParamAttr|list|tuple|bool, optional): To specify the bias parameter property
for Attention qkv computation. For Attention qkv bias, if it is a list/tuple, `attrs[0]`
would be used as `attr` for transformer layer 0, and `attrs[1]` would be used as
`attr` for transformer layer 1etc. Otherwise, all layers both use it as
`attr` for transformer layer 1, etc. Otherwise, all layers both use it as
`attr` to create parameters. The `False` value means the corresponding layer would
not have trainable bias parameter. Default: None, which means the default bias
parameter property is used. See usage for details in :code:`ParamAttr`.
linear_weight_attrs(ParamAttr|list|tuple, optional): To specify the weight parameter property
for Attention linear. For Attention linear weight, if it is a list/tuple, `attrs[0]`
would be used as `attr` for transformer layer 0, and `attrs[1]` would be used as
`attr` for transformer layer 1etc. Otherwise, all layers both use it as
`attr` for transformer layer 1, etc. Otherwise, all layers both use it as
`attr` to create parameters. Default: None, which means the default weight
parameter property is used. See usage for details in :code:`ParamAttr`.
linear_bias_attrs(ParamAttr|list|tuple|bool, optional): To specify the bias parameter property
for Attention linear computation. For Attention linear bias, if it is a list/tuple, `attrs[0]`
would be used as `attr` for transformer layer 0, and `attrs[1]` would be used as
`attr` for transformer layer 1etc. Otherwise, all layers both use it as
`attr` for transformer layer 1, etc. Otherwise, all layers both use it as
`attr` to create parameters. The `False` value means the corresponding layer would
not have trainable bias parameter. Default: None, which means the default bias
parameter property is used. See usage for details in :code:`ParamAttr`.
ffn_ln_scale_attrs(ParamAttr|list|tuple, optional): To specify the weight parameter property
for FFN layer_norm. For FFN layer_norm weight, if it is a list/tuple, `attrs[0]`
would be used as `attr` for transformer layer 0, and `attrs[1]` would be used as
`attr` for transformer layer 1etc. Otherwise, all layers both use it as
`attr` for transformer layer 1, etc. Otherwise, all layers both use it as
`attr` to create parameters. Default: None, which means the default weight
parameter property is used. See usage for details in :code:`ParamAttr`.
ffn_ln_bias_attrs(ParamAttr|list|tuple|bool, optional): To specify the bias parameter property
for FFN layer_norm. For FFN layer_norm bias, if it is a list/tuple, `attrs[0]`
would be used as `attr` for transformer layer 0, and `attrs[1]` would be used as
`attr` for transformer layer 1etc. Otherwise, all layers both use it as
`attr` for transformer layer 1, etc. Otherwise, all layers both use it as
`attr` to create parameters. The `False` value means the corresponding layer would
not have trainable bias parameter. Default: None, which means the default bias
parameter property is used. See usage for details in :code:`ParamAttr`.
ffn1_weight_attrs(ParamAttr|list|tuple, optional): To specify the weight parameter property
for FFN first linear. For FFN first linear weight, if it is a list/tuple, `attrs[0]`
would be used as `attr` for transformer layer 0, and `attrs[1]` would be used as
`attr` for transformer layer 1etc. Otherwise, all layers both use it as
`attr` for transformer layer 1, etc. Otherwise, all layers both use it as
`attr` to create parameters. Default: None, which means the default weight
parameter property is used. See usage for details in :code:`ParamAttr`.
ffn1_bias_attrs(ParamAttr|list|tuple|bool, optional): To specify the bias parameter property
for FFN first linear. For FFN first linear bias, if it is a list/tuple, `attrs[0]`
would be used as `attr` for transformer layer 0, and `attrs[1]` would be used as
`attr` for transformer layer 1etc. Otherwise, all layers both use it as
`attr` for transformer layer 1, etc. Otherwise, all layers both use it as
`attr` to create parameters. The `False` value means the corresponding layer would
not have trainable bias parameter. Default: None, which means the default bias
parameter property is used. See usage for details in :code:`ParamAttr`.
ffn2_weight_attrs(ParamAttr|list|tuple, optional): To specify the weight parameter property
for FFN second linear. For FFN second linear weight, if it is a list/tuple, `attrs[0]`
would be used as `attr` for transformer layer 0, and `attrs[1]` would be used as
`attr` for transformer layer 1etc. Otherwise, all layers both use it as
`attr` for transformer layer 1, etc. Otherwise, all layers both use it as
`attr` to create parameters. Default: None, which means the default weight
parameter property is used. See usage for details in :code:`ParamAttr`.
ffn2_bias_attrs(ParamAttr|list|tuple|bool, optional): To specify the bias parameter property
for FFN second linear. For FFN second linear bias, if it is a list/tuple, `attrs[0]`
would be used as `attr` for transformer layer 0, and `attrs[1]` would be used as
`attr` for transformer layer 1etc. Otherwise, all layers both use it as
`attr` for transformer layer 1, etc. Otherwise, all layers both use it as
`attr` to create parameters. The `False` value means the corresponding layer would
not have trainable bias parameter. Default: None, which means the default bias
parameter property is used. See usage for details in :code:`ParamAttr`.
......
......@@ -1925,7 +1925,7 @@ def rnnt_loss(
to compute Sequence Transduction with Recurrent Neural Networks (RNN-T) loss.
Parameters:
input (Tensor): The logprobs sequence with padding, which is a 4-D Tensor. The tensor shape is [B, Tmax, Umax, D], where Tmax, is the longest length of input logit sequence. The data type should be float32 or float64.
input (Tensor): The logprobs sequence with padding, which is a 4-D Tensor. The tensor shape is [B, Tmax, Umax, D], where Tmax is the longest length of input logit sequence. The data type should be float32 or float64.
label (Tensor): The ground truth sequence with padding, which must be a 2-D Tensor. The tensor shape is [B, Umax], where Umax is the longest length of label sequence. The data type must be int32.
input_lengths (Tensor): The length for each input sequence, it should have shape [batch_size] and dtype int64.
label_lengths (Tensor): The length for each label sequence, it should have shape [batch_size] and dtype int64.
......
......@@ -4599,8 +4599,8 @@ def gcd(x, y, name=None):
If x.shape != y.shape, they must be broadcastable to a common shape (which becomes the shape of the output).
Args:
x (Tensor): An N-D Tensor, the data type is int32int64.
y (Tensor): An N-D Tensor, the data type is int32int64.
x (Tensor): An N-D Tensor, the data type is int32, int64.
y (Tensor): An N-D Tensor, the data type is int32, int64.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
......@@ -4684,8 +4684,8 @@ def lcm(x, y, name=None):
If x.shape != y.shape, they must be broadcastable to a common shape (which becomes the shape of the output).
Args:
x (Tensor): An N-D Tensor, the data type is int32int64.
y (Tensor): An N-D Tensor, the data type is int32int64.
x (Tensor): An N-D Tensor, the data type is int32, int64.
y (Tensor): An N-D Tensor, the data type is int32, int64.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
......
......@@ -39,12 +39,12 @@ def argsort(x, axis=-1, descending=False, name=None):
Sorts the input along the given axis, and returns the corresponding index tensor for the sorted output values. The default sort algorithm is ascending, if you want the sort algorithm to be descending, you must set the :attr:`descending` as True.
Args:
x(Tensor): An input N-D Tensor with type float16, float32, float64, int16,
x (Tensor): An input N-D Tensor with type float16, float32, float64, int16,
int32, int64, uint8.
axis(int, optional): Axis to compute indices along. The effective range
axis (int, optional): Axis to compute indices along. The effective range
is [-R, R), where R is Rank(x). when axis<0, it works the same way
as axis+R. Default is -1.
descending(bool, optional) : Descending is a flag, if set to true,
descending (bool, optional) : Descending is a flag, if set to true,
algorithm will sort by descending order, else sort by
ascending order. Default is false.
name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
......@@ -135,13 +135,13 @@ def argmax(x, axis=None, keepdim=False, dtype="int64", name=None):
element along the provided axis.
Args:
x(Tensor): An input N-D Tensor with type float16, float32, float64, int16,
x (Tensor): An input N-D Tensor with type float16, float32, float64, int16,
int32, int64, uint8.
axis(int, optional): Axis to compute indices along. The effective range
axis (int, optional): Axis to compute indices along. The effective range
is [-R, R), where R is x.ndim. when axis < 0, it works the same way
as axis + R. Default is None, the input `x` will be into the flatten tensor, and selecting the min value index.
keepdim(bool, optional): Whether to keep the given axis in output. If it is True, the dimensions will be same as input x and with size one in the axis. Otherwise the output dimentions is one fewer than x since the axis is squeezed. Default is False.
dtype(str|np.dtype, optional): Data type of the output tensor which can
keepdim (bool, optional): Whether to keep the given axis in output. If it is True, the dimensions will be same as input x and with size one in the axis. Otherwise the output dimentions is one fewer than x since the axis is squeezed. Default is False.
dtype (str|np.dtype, optional): Data type of the output tensor which can
be int32, int64. The default value is ``int64`` , and it will
return the int64 indices.
name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
......@@ -225,16 +225,16 @@ def argmin(x, axis=None, keepdim=False, dtype="int64", name=None):
element along the provided axis.
Args:
x(Tensor): An input N-D Tensor with type float16, float32, float64, int16,
x (Tensor): An input N-D Tensor with type float16, float32, float64, int16,
int32, int64, uint8.
axis(int, optional): Axis to compute indices along. The effective range
axis (int, optional): Axis to compute indices along. The effective range
is [-R, R), where R is x.ndim. when axis < 0, it works the same way
as axis + R. Default is None, the input `x` will be into the flatten tensor, and selecting the min value index.
keepdim(bool, optional): Whether to keep the given axis in output. If it is True, the dimensions will be same as input x and with size one in the axis. Otherwise the output dimentions is one fewer than x since the axis is squeezed. Default is False.
dtype(str, optional): Data type of the output tensor which can
keepdim (bool, optional): Whether to keep the given axis in output. If it is True, the dimensions will be same as input x and with size one in the axis. Otherwise the output dimentions is one fewer than x since the axis is squeezed. Default is False.
dtype (str, optional): Data type of the output tensor which can
be int32, int64. The default value is 'int64', and it will
return the int64 indices.
name(str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
Returns:
Tensor, return the tensor of `int32` if set :attr:`dtype` is `int32`, otherwise return the tensor of `int64`.
......@@ -321,7 +321,7 @@ def index_select(x, index, axis=0, name=None):
x (Tensor): The input Tensor to be operated. The data of ``x`` can be one of float16, float32, float64, int32, int64.
index (Tensor): The 1-D Tensor containing the indices to index. The data type of ``index`` must be int32 or int64.
axis (int, optional): The dimension in which we index. Default: if None, the ``axis`` is 0.
name(str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
Returns:
Tensor: A Tensor with same data type as ``x``.
......@@ -476,12 +476,12 @@ def sort(x, axis=-1, descending=False, name=None):
Sorts the input along the given axis, and returns the sorted output tensor. The default sort algorithm is ascending, if you want the sort algorithm to be descending, you must set the :attr:`descending` as True.
Args:
x(Tensor): An input N-D Tensor with type float32, float64, int16,
x (Tensor): An input N-D Tensor with type float32, float64, int16,
int32, int64, uint8.
axis(int, optional): Axis to compute indices along. The effective range
axis (int, optional): Axis to compute indices along. The effective range
is [-R, R), where R is Rank(x). when axis<0, it works the same way
as axis+R. Default is -1.
descending(bool, optional) : Descending is a flag, if set to true,
descending (bool, optional) : Descending is a flag, if set to true,
algorithm will sort by descending order, else sort by
ascending order. Default is false.
name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
......@@ -551,15 +551,15 @@ def mode(x, axis=-1, keepdim=False, name=None):
Used to find values and indices of the modes at the optional axis.
Args:
x(Tensor): Tensor, an input N-D Tensor with type float32, float64, int32, int64.
axis(int, optional): Axis to compute indices along. The effective range
x (Tensor): Tensor, an input N-D Tensor with type float32, float64, int32, int64.
axis (int, optional): Axis to compute indices along. The effective range
is [-R, R), where R is x.ndim. when axis < 0, it works the same way
as axis + R. Default is -1.
keepdim(bool, optional): Whether to keep the given axis in output. If it is True, the dimensions will be same as input x and with size one in the axis. Otherwise the output dimentions is one fewer than x since the axis is squeezed. Default is False.
keepdim (bool, optional): Whether to keep the given axis in output. If it is True, the dimensions will be same as input x and with size one in the axis. Otherwise the output dimentions is one fewer than x since the axis is squeezed. Default is False.
name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
Returns:
tuple(Tensor), return the values and indices. The value data type is the same as the input `x`. The indices data type is int64.
tuple (Tensor), return the values and indices. The value data type is the same as the input `x`. The indices data type is int64.
Examples:
......@@ -815,7 +815,7 @@ def masked_select(x, mask, name=None):
Args:
x (Tensor): The input Tensor, the data type can be int32, int64, uint16, float16, float32, float64.
mask (Tensor): The Tensor containing the binary mask to index with, it's data type is bool.
name(str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
Returns:
A 1-D Tensor which is the same data type as ``x``.
......@@ -866,15 +866,15 @@ def topk(x, k, axis=None, largest=True, sorted=True, name=None):
If the input is a Tensor with higher rank, this operator computes the top k values and indices along the :attr:`axis`.
Args:
x(Tensor): Tensor, an input N-D Tensor with type float32, float64, int32, int64.
k(int, Tensor): The number of top elements to look for along the axis.
axis(int, optional): Axis to compute indices along. The effective range
x (Tensor): Tensor, an input N-D Tensor with type float32, float64, int32, int64.
k (int, Tensor): The number of top elements to look for along the axis.
axis (int, optional): Axis to compute indices along. The effective range
is [-R, R), where R is x.ndim. when axis < 0, it works the same way
as axis + R. Default is -1.
largest(bool, optional) : largest is a flag, if set to true,
largest (bool, optional) : largest is a flag, if set to true,
algorithm will sort by descending order, otherwise sort by
ascending order. Default is True.
sorted(bool, optional): controls whether to return the elements in sorted order, default value is True. In gpu device, it always return the sorted value.
sorted (bool, optional): controls whether to return the elements in sorted order, default value is True. In gpu device, it always return the sorted value.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
......@@ -943,15 +943,15 @@ def bucketize(x, sorted_sequence, out_int32=False, right=False, name=None):
This API is used to find the index of the corresponding 1D tensor `sorted_sequence` in the innermost dimension based on the given `x`.
Args:
x(Tensor): An input N-D tensor value with type int32, int64, float32, float64.
sorted_sequence(Tensor): An input 1-D tensor with type int32, int64, float32, float64. The value of the tensor monotonically increases in the innermost dimension.
out_int32(bool, optional): Data type of the output tensor which can be int32, int64. The default value is False, and it indicates that the output data type is int64.
right(bool, optional): Find the upper or lower bounds of the sorted_sequence range in the innermost dimension based on the given `x`. If the value of the sorted_sequence is nan or inf, return the size of the innermost dimension.
x (Tensor): An input N-D tensor value with type int32, int64, float32, float64.
sorted_sequence (Tensor): An input 1-D tensor with type int32, int64, float32, float64. The value of the tensor monotonically increases in the innermost dimension.
out_int32 (bool, optional): Data type of the output tensor which can be int32, int64. The default value is False, and it indicates that the output data type is int64.
right (bool, optional): Find the upper or lower bounds of the sorted_sequence range in the innermost dimension based on the given `x`. If the value of the sorted_sequence is nan or inf, return the size of the innermost dimension.
The default value is False and it shows the lower bounds.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`.
name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor(the same sizes of the `x`), return the tensor of int32 if set :attr:`out_int32` is True, otherwise return the tensor of int64.
Tensor (the same sizes of the `x`), return the tensor of int32 if set :attr:`out_int32` is True, otherwise return the tensor of int64.
Examples:
......@@ -1003,15 +1003,15 @@ def searchsorted(
Find the index of the corresponding `sorted_sequence` in the innermost dimension based on the given `values`.
Args:
sorted_sequence(Tensor): An input N-D or 1-D tensor with type int32, int64, float32, float64. The value of the tensor monotonically increases in the innermost dimension.
values(Tensor): An input N-D tensor value with type int32, int64, float32, float64.
out_int32(bool, optional): Data type of the output tensor which can be int32, int64. The default value is False, and it indicates that the output data type is int64.
right(bool, optional): Find the upper or lower bounds of the sorted_sequence range in the innermost dimension based on the given `values`. If the value of the sorted_sequence is nan or inf, return the size of the innermost dimension.
sorted_sequence (Tensor): An input N-D or 1-D tensor with type int32, int64, float32, float64. The value of the tensor monotonically increases in the innermost dimension.
values (Tensor): An input N-D tensor value with type int32, int64, float32, float64.
out_int32 (bool, optional): Data type of the output tensor which can be int32, int64. The default value is False, and it indicates that the output data type is int64.
right (bool, optional): Find the upper or lower bounds of the sorted_sequence range in the innermost dimension based on the given `values`. If the value of the sorted_sequence is nan or inf, return the size of the innermost dimension.
The default value is False and it shows the lower bounds.
name(str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
Returns:
Tensor(the same sizes of the `values`), return the tensor of int32 if set :attr:`out_int32` is True, otherwise return the tensor of int64.
Tensor (the same sizes of the `values`), return the tensor of int32 if set :attr:`out_int32` is True, otherwise return the tensor of int64.
Examples:
......@@ -1074,12 +1074,12 @@ def kthvalue(x, k, axis=None, keepdim=False, name=None):
Find values and indices of the k-th smallest at the axis.
Args:
x(Tensor): A N-D Tensor with type float16, float32, float64, int32, int64.
k(int): The k for the k-th smallest number to look for along the axis.
axis(int, optional): Axis to compute indices along. The effective range
x (Tensor): A N-D Tensor with type float16, float32, float64, int32, int64.
k (int): The k for the k-th smallest number to look for along the axis.
axis (int, optional): Axis to compute indices along. The effective range
is [-R, R), where R is x.ndim. when axis < 0, it works the same way
as axis + R. The default is None. And if the axis is None, it will computed as -1 by default.
keepdim(bool, optional): Whether to keep the given axis in output. If it is True, the dimensions will be same as input x and with size one in the axis. Otherwise the output dimentions is one fewer than x since the axis is squeezed. Default is False.
keepdim (bool, optional): Whether to keep the given axis in output. If it is True, the dimensions will be same as input x and with size one in the axis. Otherwise the output dimentions is one fewer than x since the axis is squeezed. Default is False.
name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
Returns:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册