未验证 提交 93b2bf4b 编写于 作者: S Shuangchi He 提交者: GitHub

Fix some typos. (#50429)

* Fix some typos.
Signed-off-by: 洞幺01's avatarYulv-git <yulvchi@qq.com>

* pre-commit
Signed-off-by: 洞幺01's avatarYulv-git <yulvchi@qq.com>

---------
Signed-off-by: 洞幺01's avatarYulv-git <yulvchi@qq.com>
上级 1fd1c169
......@@ -33,7 +33,7 @@ add_subdirectory(ir)
# Next, (to be discusssed)
# (1) move all source files to same folder,
# (2) naturally, and and configure tests in only one `CMakeLists.txt`,
# (2) naturally, and configure tests in only one `CMakeLists.txt`,
# (3) cc tests support linking pre-built dynamic libraries. For example, use the dynamic
# library in the installed paddle by `pip`.
......
......@@ -156,7 +156,7 @@ class Dataset {
virtual void DestroyPreLoadReaders() = 0;
// set preload thread num
virtual void SetPreLoadThreadNum(int thread_num) = 0;
// seperate train thread and dataset thread
// separate train thread and dataset thread
virtual void DynamicAdjustChannelNum(int channel_num,
bool discard_remaining_ins = false) = 0;
virtual void DynamicAdjustReadersNum(int thread_num) = 0;
......
......@@ -219,7 +219,7 @@ class OpDesc {
return ret_val;
}
// it it really needed? or just maintain a ptr from block?
// Is it really needed? Or just maintain a ptr from the block?
proto::OpDesc desc_;
BlockDesc *block_{nullptr}; // not_own
// input arg name => input variable names
......
......@@ -3502,7 +3502,7 @@ void OperatorWithKernel::BuildPhiKernelContext(
// we try to add these Attrs to the RuntimeAttrs, but these OpDesc will lose
// the RuntimeAttrs information in the process of converting the Graph to
// the Program, so additional record configuration will be introduced,
// which increases the The cost of development and understanding, so we
// which increases the cost of development and understanding, so we
// still use Attrs to get and the attributes set by these passes from Attrs
// for the time being. In the future, it is necessary to clarify the
// positioning of RuntimeAttrs and expand related functions.
......
......@@ -210,7 +210,7 @@ class VarDesc {
proto::VarType::TensorDesc *mutable_tensor_desc();
std::vector<proto::VarType::TensorDesc *> mutable_tensor_descs();
// it it really needed? or just mantain a ptr from block?
// Is it really needed? Or just mantain a ptr from the block?
proto::VarDesc desc_;
AttributeMap attrs_;
......
......@@ -56,7 +56,7 @@ class CAllReduceOpConverter : public OpConverter {
output_num,
1UL,
platform::errors::InvalidArgument(
"The ouput Out's size must equal to 1 in TRT c_allreduce op. "
"The output Out's size must equal to 1 in TRT c_allreduce op. "
"But received Out's size %u.",
output_num));
// Get attrs
......
......@@ -237,7 +237,8 @@ std::unordered_set<std::string> CinnLaunchContext::ExtractInternalVarNames(
};
VLOG(1) << "Input var list: " << string::join_strings(input_var_names, ", ");
VLOG(1) << "Ouput var list: " << string::join_strings(output_var_names, ", ");
VLOG(1) << "Output var list: "
<< string::join_strings(output_var_names, ", ");
std::for_each(
input_var_names.begin(), input_var_names.end(), exclude_names_fn);
std::for_each(
......
......@@ -277,7 +277,7 @@ class DlnneEngineOp : public framework::OperatorBase {
std::string rlym_file_name =
subgraph_root_path_ + "/" + engine_key_ + ".rlym";
// quantize don't support set quantized ouput model path now,
// quantize don't support set quantized output model path now,
// the quantized model file is in current dir
std::string quantized_rlym_file_name = engine_key_ + ".quantized.rlym";
......
......@@ -90,7 +90,7 @@ class AttnMatmulINT8 {
PADDLE_ENFORCE_EQ(cudaGetLastError(),
cudaSuccess,
platform::errors::Fatal(
"cuda error occured after computing bias. "
"cuda error occurred after computing bias. "
"But it does not mean this error is caused by "
"bias computing"));
}
......@@ -144,7 +144,7 @@ class AttnMatmulINT8 {
PADDLE_ENFORCE_EQ(cudaGetLastError(),
cudaSuccess,
platform::errors::Fatal(
"cuda error occured after computing bias. "
"cuda error occurred after computing bias. "
"But it does not mean this error is caused by "
"bias computing"));
}
......
......@@ -715,7 +715,7 @@ class FMHAGateRef {
softmax_out->dims(),
platform::errors::InvalidArgument(
"The shape of qk_out_grad and softmax_out is "
"expected to be the same. But recieved qk_out_grad's "
"expected to be the same. But received qk_out_grad's "
"shape = %s, softmax_out's shape = %s.",
qk_out_grad->dims(),
softmax_out->dims()));
......
......@@ -376,7 +376,7 @@ class FusedGateAttentionOpKernel : public framework::OpKernel<T> {
true,
platform::errors::InvalidArgument(
"key is expected to be nullptr or the same as "
"query, but recieved key=%p, query=%p.",
"query, but received key=%p, query=%p.",
key,
query));
......
......@@ -71,7 +71,7 @@ class KLDivLossOpMaker : public framework::OpProtoAndCheckerMaker {
While :attr:`reduction` is :attr:`none`, output loss is in
the same shape as Input(X), loss in each point is calculated
seperately and no reduction is applied.
separately and no reduction is applied.
While :attr:`reduction` is :attr:`mean`, output loss is in
shape of [1] and loss value is the mean value of all losses.
......
......@@ -52,8 +52,8 @@ class LinearChainCRFOpMaker : public framework::OpProtoAndCheckerMaker {
"The forward vectors for the entire batch. Denote it as $\alpha$. "
"$\alpha$ is a memo table used to calculate the normalization "
"factor in CRF. $\alpha[k, v]$ stores the unnormalized "
"probabilites of all possible unfinished sequences of tags that end at "
"position $k$ with tag $v$. For each $k$, "
"probabilities of all possible unfinished sequences of tags that end "
"at position $k$ with tag $v$. For each $k$, "
"$\alpha[k, v]$ is a vector of length $D$ with a component for "
"each tag value $v$. This vector is called a forward vecotr and "
"will also be used in backward computations.")
......
......@@ -1099,7 +1099,7 @@ void DropoutNdInferMeta(const MetaTensor& x,
x_dims.size(),
phi::errors::InvalidArgument(
"The length of axis is expected to be less than or equal to the "
"dimension size of x. But recieved the length of axis is %d, the "
"dimension size of x. But received the length of axis is %d, the "
"dimension size of x is %d, x's shape is {%s}.",
axis.size(),
x_dims.size(),
......@@ -1111,7 +1111,7 @@ void DropoutNdInferMeta(const MetaTensor& x,
phi::errors::InvalidArgument(
"The %d-th value of axis is expected to be greater ot "
"equal to 0 and less than the dimensions of x. But "
"recieved axis is {%s}, the dimension size of x is %d.",
"received axis is {%s}, the dimension size of x is %d.",
i,
phi::make_ddim(axis),
x_dims.size()));
......
......@@ -32,7 +32,7 @@ void CreateInferMeta(const IntArray& shape, DataType dtype, MetaTensor* out) {
0,
phi::errors::InvalidArgument(
"Each value of attribute 'shape' is expected to be no less "
"than 0. But recieved: shape[%u] = %d; shape = [%s].",
"than 0. But received: shape[%u] = %d; shape = [%s].",
i,
data[i],
phi::make_ddim(data)));
......
......@@ -54,7 +54,7 @@ void GatherTreeKernel(const Context &dev_ctx,
parent,
beam_size,
phi::errors::InvalidArgument(
"The parents must be less than beam size, but recieved"
"The parents must be less than beam size, but received"
"parents %d is greater than or equal to beam size %d. ",
parent,
beam_size));
......
......@@ -862,7 +862,7 @@ void BroadcastKernelForDifferentVecSize(
kArity,
phi::errors::InvalidArgument("The number of inputs is expected to be "
"equal to the "
"arity of functor. But recieved: the "
"arity of functor. But received: the "
"number of inputs "
"is %d, the arity of functor is %d.",
ins.size(),
......
......@@ -37,7 +37,7 @@ __global__ void GatherTree(const T *ids_data,
auto parent = parents_data[idx];
for (int step = max_length - 2; step >= 0; step--) {
PADDLE_ENFORCE((parent < beam_size),
"The parents must be less than beam size, but recieved"
"The parents must be less than beam size, but received"
"parents %ld is greater than or equal to beam size %ld. ",
parent,
beam_size);
......
......@@ -35,7 +35,7 @@ A principle here is that source code lies on the development computer (host) so
### Build Environments
The lastest pre-built build environment images are:
The latest pre-built build environment images are:
| Image | Tag |
| ----- | --- |
......
......@@ -119,13 +119,13 @@ def shard_op(op, process_mesh=None, in_shard_specs=None, out_shard_specs=None):
in_shard_specs (list of list, optional): a list of list to describe the sharding specifications
for the inputs. Each item of `in_shard_specs` is a `shard_spec` between the correspoinding input
and `process_mesh`. If one item is None, the cooresponding input is replicated across all processes
If it is None, all inputs are replicated accross all processes. Note that the lenght of the
If it is None, all inputs are replicated across all processes. Note that the lenght of the
`in_shard_specs` should be equal to the actual number of inputs when calling this operation.
Default: None.
out_shard_specs (list of list, optional): a list of list to describe the sharding specifications
for the outputs. Each item of `out_shard_specs` is a `shard_spec` between the correspoinding output
and `process_mesh`. If one item is None, the cooresponding output is replicated across all processes
If it is None, all outputs are replicated accross all processes. Note that the lenght of the
If it is None, all outputs are replicated across all processes. Note that the lenght of the
`in_shard_specs` should be equal to the actual number of inputs when calling this operation.
Default: None. Default: None.
......
......@@ -67,7 +67,7 @@ def _scope_dist2single(dist_scope):
"column_parallel_linear": "linear",
"vocab_parallel_embedding": "embedding",
# "parallel_cross_entropy": "cross_entropy", while mp_layer has parallel_cross_entropy,
# but there is no parameters so the mapping of parallel_cross_entropy is not neccessary.
# but there is no parameters so the mapping of parallel_cross_entropy is not necessary.
}
return mapping.get(dist_scope, dist_scope)
......
......@@ -653,7 +653,7 @@ class TrtLayerAutoScanTest(AutoScanTest):
os.getenv('TEST_NUM_PERCENT_CASES', default='1.0')
)
# Use a seperate random generator for skipping tests
# Use a separate random generator for skipping tests
self.skip_rng = np.random.default_rng(int(time.strftime("%W")))
def create_inference_config(self, use_trt=True) -> paddle_infer.Config:
......
......@@ -338,7 +338,7 @@ class TestFusedGateAttentionOp(OpTest):
# matmul(x, y, transpose_x=False, transpose_y=True). With different
# transpose_x and transpose_y, cublas will launch different kernels
# and the result cannot be exactly equal.
# Because the arguments of matmul in einsum is the the same as
# Because the arguments of matmul in einsum are the same as
# that in fused ops, check_equal is set to False and we use allclose
# to check the correctness.
check_equal = False
......
......@@ -147,7 +147,7 @@ def REGISTER_FN(op_type, *position_argnames):
Args:
op_type(str): The op name
position_argnames(list[str]): Input and ouput names of the op
position_argnames(list[str]): Input and output names of the op
Returns:
wrapper: Inner wrapper function
......
......@@ -121,7 +121,7 @@ class FusedBiasDropoutResidualLayerNorm(Layer):
super().__init__()
assert embed_dim > 0, (
"Expected embed_dim to be greater than 0, "
"but recieved {}".format(embed_dim)
"but received {}".format(embed_dim)
)
self._dtype = self._helper.get_default_dtype()
self._bias_attr = bias_attr
......
......@@ -108,7 +108,7 @@ class LRScheduler:
def __call__(self):
"""
Return lastest computed learning rate on current epoch.
Return latest computed learning rate on current epoch.
"""
return self.last_lr
......
......@@ -1816,11 +1816,11 @@ def conv3d_transpose(
W^\prime_{out} &= (W_{in} − 1) * strides[2] + dilations[2] * (W_f − 1) + 1
If `output_size` is None, :math:`D_{out} = D^\prime_{out}, :math:`H_{out} = \
H^\prime_{out}, W_{out} = W^\prime_{out}`; else, the specified `output_size_depth` (the depth of the ouput feature layer) :math:`D_{out}`
H^\prime_{out}, W_{out} = W^\prime_{out}`; else, the specified `output_size_depth` (the depth of the output feature layer) :math:`D_{out}`
must between :math:`D^\prime_{out}` and :math:`D^\prime_{out} + strides[0]`(not including :math:`D^\prime_{out} + strides[0]`),
the specified `output_size_height` (the height of the ouput feature layer) :math:`H_{out}` must between :math:`H^\prime_{out}`
the specified `output_size_height` (the height of the output feature layer) :math:`H_{out}` must between :math:`H^\prime_{out}`
and :math:`H^\prime_{out} + strides[1]`(not including :math:`H^\prime_{out} + strides[1]`),
and the the specified `output_size_width` (the width of the ouput feature layer) :math:`W_{out}` must
and the specified `output_size_width` (the width of the output feature layer) :math:`W_{out}` must
between :math:`W^\prime_{out}` and :math:`W^\prime_{out} + strides[2]`(not including :math:`W^\prime_{out} + strides[2]`).
Since transposed convolution can be treated as the inverse of convolution,
......
......@@ -37,7 +37,7 @@ def getFNDAFile(rootPath, test):
os.system('touch %s' % fn_filename)
try:
f = open(filename)
print("oepn %s succesfully" % filename)
print("oepn %s successfully" % filename)
except FileNotFoundError:
print("%s is not found." % filename)
return
......@@ -84,7 +84,7 @@ def analysisFNDAFile(rootPath, test):
notrelated_ut_map_file
):
print(
"make %s and %s succesfully"
"make %s and %s successfully"
% (related_ut_map_file, related_ut_map_file)
)
else:
......@@ -96,7 +96,7 @@ def analysisFNDAFile(rootPath, test):
fn_filename = '%s/build/ut_map/%s/fnda.tmp' % (rootPath, test)
try:
f = open(fn_filename)
print("oepn %s succesfully" % fn_filename)
print("oepn %s successfully" % fn_filename)
except FileNotFoundError:
print("%s is not found." % fn_filename)
return
......@@ -148,7 +148,7 @@ def getBaseFnda(rootPath, test):
filename = '%s/build/ut_map/%s/coverage.info.tmp' % (rootPath, test)
try:
f = open(filename)
print("oepn %s succesfully" % filename)
print("oepn %s successfully" % filename)
except FileNotFoundError:
print("%s is not found." % filename)
symbol_fnda = {}
......@@ -228,7 +228,7 @@ def getCovinfo(rootPath, test):
)
return
else:
print("get coverage.info of %s succesfully" % ut_map_path)
print("get coverage.info of %s successfully" % ut_map_path)
os.system(
"cd %s && lcov --extract coverage.info '/paddle/paddle/phi/*' '/paddle/paddle/utils/*' '/paddle/paddle/fluid/*' '/paddle/build/*' -o coverage.info.tmp --rc lcov_branch_coverage=0 > /dev/null 2>&1"
% ut_map_path
......@@ -239,7 +239,7 @@ def getCovinfo(rootPath, test):
print("coverage.info.tmp is empty,collect coverage rate failed")
return
else:
print("get coverage.info.tmp succesfully")
print("get coverage.info.tmp successfully")
os.system('rm -rf %s/paddle' % ut_map_path)
os.system('rm -rf %s/coverage.info' % ut_map_path)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册