未验证 提交 4a0855a5 编写于 作者: C chenxujun 提交者: GitHub

Fix typos (#50852)

上级 8a503522
...@@ -46,7 +46,7 @@ def frame(x, frame_length, hop_length, axis=-1, name=None): ...@@ -46,7 +46,7 @@ def frame(x, frame_length, hop_length, axis=-1, name=None):
The output frames tensor with shape `[..., frame_length, num_frames]` if `axis==-1`, The output frames tensor with shape `[..., frame_length, num_frames]` if `axis==-1`,
otherwise `[num_frames, frame_length, ...]` where otherwise `[num_frames, frame_length, ...]` where
`num_framse = 1 + (x.shape[axis] - frame_length) // hop_length` `num_frames = 1 + (x.shape[axis] - frame_length) // hop_length`
Examples: Examples:
...@@ -357,7 +357,7 @@ def stft( ...@@ -357,7 +357,7 @@ def stft(
) )
pad_length = n_fft // 2 pad_length = n_fft // 2
# FIXME: Input `x` can be a complex tensor but pad does not supprt complex input. # FIXME: Input `x` can be a complex tensor but pad does not support complex input.
x = paddle.nn.functional.pad( x = paddle.nn.functional.pad(
x.unsqueeze(-1), x.unsqueeze(-1),
pad=[pad_length, pad_length], pad=[pad_length, pad_length],
...@@ -428,13 +428,13 @@ def istft( ...@@ -428,13 +428,13 @@ def istft(
- :math:`H`: Value of `hop_length`. - :math:`H`: Value of `hop_length`.
Result of `istft` expected to be the inverse of `paddle.signal.stft`, but it is Result of `istft` expected to be the inverse of `paddle.signal.stft`, but it is
not guaranteed to reconstruct a exactly realizible time-domain signal from a STFT not guaranteed to reconstruct a exactly realizable time-domain signal from a STFT
complex tensor which has been modified (via masking or otherwise). Therefore, `istft` complex tensor which has been modified (via masking or otherwise). Therefore, `istft`
gives the `[Griffin-Lim optimal estimate] <https://ieeexplore.ieee.org/document/1164317>`_ gives the `[Griffin-Lim optimal estimate] <https://ieeexplore.ieee.org/document/1164317>`_
(optimal in a least-squares sense) for the corresponding signal. (optimal in a least-squares sense) for the corresponding signal.
Args: Args:
x (Tensor): The input data which is a 2-dimensional or 3-dimensional **complesx** x (Tensor): The input data which is a 2-dimensional or 3-dimensional **complex**
Tensor with shape `[..., n_fft, num_frames]`. Tensor with shape `[..., n_fft, num_frames]`.
n_fft (int): The size of Fourier transform. n_fft (int): The size of Fourier transform.
hop_length (int, optional): Number of steps to advance between adjacent windows hop_length (int, optional): Number of steps to advance between adjacent windows
...@@ -550,7 +550,7 @@ def istft( ...@@ -550,7 +550,7 @@ def istft(
if win_length < n_fft: if win_length < n_fft:
pad_left = (n_fft - win_length) // 2 pad_left = (n_fft - win_length) // 2
pad_right = n_fft - win_length - pad_left pad_right = n_fft - win_length - pad_left
# FIXME: Input `window` can be a complex tensor but pad does not supprt complex input. # FIXME: Input `window` can be a complex tensor but pad does not support complex input.
window = paddle.nn.functional.pad( window = paddle.nn.functional.pad(
window, pad=[pad_left, pad_right], mode='constant' window, pad=[pad_left, pad_right], mode='constant'
) )
......
...@@ -215,7 +215,7 @@ if __name__ == "__main__": ...@@ -215,7 +215,7 @@ if __name__ == "__main__":
else: else:
print( print(
"""Usage: """Usage:
1. Count and list all operator-raleated APIs that contains append_op but not _legacy_C_ops.xx. 1. Count and list all operator-related APIs that contains append_op but not _legacy_C_ops.xx.
python ./count_api_without_core_ops.py -c paddle python ./count_api_without_core_ops.py -c paddle
2. Print api and the md5 of source code of the api. 2. Print api and the md5 of source code of the api.
python ./count_api_without_core_ops.py -p paddle python ./count_api_without_core_ops.py -p paddle
......
...@@ -16,7 +16,7 @@ import paddle.fluid.framework as framework ...@@ -16,7 +16,7 @@ import paddle.fluid.framework as framework
from paddle.fluid import core from paddle.fluid import core
# collect original ops: op which has both inference and grid defination # collect original ops: op which has both inference and grad definition
def get_original_ops(): def get_original_ops():
all_ops, _, _ = core.op_supported_infos('CPU', core.VarDesc.VarType.FP16) all_ops, _, _ = core.op_supported_infos('CPU', core.VarDesc.VarType.FP16)
grad_ops = [] grad_ops = []
...@@ -224,7 +224,7 @@ def get_constraint(op_type, op_proto): ...@@ -224,7 +224,7 @@ def get_constraint(op_type, op_proto):
return constraint return constraint
# funtion to generate paddle op dialect file # function to generate paddle op dialect file
def convert_op_proto_into_mlir(op_descs): def convert_op_proto_into_mlir(op_descs):
dst_dialect_file = "../../paddle/infrt/dialect/pd/ir/pd_ops.td" dst_dialect_file = "../../paddle/infrt/dialect/pd/ir/pd_ops.td"
......
...@@ -125,7 +125,7 @@ def generate_inputs_info(input_info): ...@@ -125,7 +125,7 @@ def generate_inputs_info(input_info):
input_args_ = "" input_args_ = ""
for index in range(len(input_info)): for index in range(len(input_info)):
[target_, layout_, precision_] = input_info[index].split(',') [target_, layout_, precision_] = input_info[index].split(',')
# todo: check vadility # todo: check validity
target_ = target_type_converter[target_.strip()] target_ = target_type_converter[target_.strip()]
layout_ = layout_type_converter[layout_.strip()] layout_ = layout_type_converter[layout_.strip()]
precision_ = precision_type_converter[precision_.strip()] precision_ = precision_type_converter[precision_.strip()]
...@@ -153,7 +153,7 @@ def generate_results_info(output_info): ...@@ -153,7 +153,7 @@ def generate_results_info(output_info):
output_args_ = "let results = (outs " output_args_ = "let results = (outs "
for index in range(len(output_info)): for index in range(len(output_info)):
[target_, layout_, precision_] = output_info[index].split(',') [target_, layout_, precision_] = output_info[index].split(',')
# todo: check vadility # todo: check validity
target_ = target_type_converter[target_.strip()] target_ = target_type_converter[target_.strip()]
layout_ = layout_type_converter[layout_.strip()] layout_ = layout_type_converter[layout_.strip()]
precision_ = precision_type_converter[precision_.strip()] precision_ = precision_type_converter[precision_.strip()]
......
...@@ -47,7 +47,7 @@ def get_skipped_kernel_list(): ...@@ -47,7 +47,7 @@ def get_skipped_kernel_list():
def parse_args(): def parse_args():
parser = argparse.ArgumentParser("gather phi kernel and infermate info") parser = argparse.ArgumentParser("gather phi kernel and infermeta info")
parser.add_argument( parser.add_argument(
"--paddle_root_path", "--paddle_root_path",
type=str, type=str,
......
...@@ -42,16 +42,16 @@ def get_compat_kernels_info(register): ...@@ -42,16 +42,16 @@ def get_compat_kernels_info(register):
txt = f.readlines() txt = f.readlines()
content = "" content = ""
registry = False registry = False
is_macro_defination = False is_macro_definition = False
for line in txt: for line in txt:
if line.strip().startswith( if line.strip().startswith(
"#define" "#define"
) and line.strip().endswith("\\"): ) and line.strip().endswith("\\"):
is_macro_defination = True is_macro_definition = True
continue continue
if is_macro_defination: if is_macro_definition:
if not line.strip().endswith("\\"): if not line.strip().endswith("\\"):
is_macro_defination = False is_macro_definition = False
continue continue
if register in line: if register in line:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册