未验证 提交 d89e0367 编写于 作者: C co63oc 提交者: GitHub

Fix typos (#53960)

上级 5996f623
......@@ -113,14 +113,14 @@ void AsyncExecutor::RunFromFile(const ProgramDesc& main_program,
}
/*
readerDesc: protobuf description for reader initlization
readerDesc: protobuf description for reader initialization
argument: class_name, batch_size, use_slot, queue_size, buffer_size,
padding_index
reader:
1) each thread has a reader, reader will read input data and
put it into input queue
2) each reader has a Next() iterface, that can fetch an instance
2) each reader has a Next() interface, that can fetch an instance
from the input queue
*/
// todo: should be factory method for creating datafeed
......
......@@ -44,7 +44,7 @@ class EqualGreaterThanChecker {
PADDLE_ENFORCE_GE(
value,
lower_bound_,
platform::errors::OutOfRange("Check for attribute valur equal or "
platform::errors::OutOfRange("Check for attribute value equal or "
"greater than a certain value failed."));
}
......@@ -92,7 +92,7 @@ class TypedAttrVarInfoChecker {
true,
platform::errors::InvalidArgument(
"Required dtype of Attribute(%s) shall be "
"int32|int64, but recevied %s.",
"int32|int64, but received %s.",
var_desc->Name(),
dtype));
}
......
......@@ -316,7 +316,7 @@ void BlockDesc::MoveFrom(BlockDesc *block) {
// NOTE(GhostScreaming): don't use program->proto()->blocks_size(),
// previous assignment of new Variable in vars_ use std::move,
// which makes 'var_ptr' which holded by 'block' a nullptr.
// block->Program()->proto() will calls Flush() at firtst,
// block->Program()->proto() will calls Flush() at first,
// a null var_ptr will cause segmentation fault.
int block_size = static_cast<int>(program->Size());
for (int i = 0; i < block_size; ++i) {
......
......@@ -53,7 +53,7 @@ namespace ir {
static constexpr double kMB = 1048576.0;
// SetFuseParameterGroupsSize and SetFuseParameterMemorySize are used in unit
// test, because it is invalid that seting 'FLAGS_fuse_parameter_memory_size'
// test, because it is invalid that setting 'FLAGS_fuse_parameter_memory_size'
// and 'FLAGS_fuse_parameter_groups_size' in unit test.
void SetFuseParameterGroupsSize(int group_size) {
FLAGS_fuse_parameter_groups_size = group_size;
......@@ -567,7 +567,7 @@ class CoalesceGradTensorPass : public ir::Pass {
// coalesce_tensor op needs to be executed again after the execution
// of DropScope().
// we can make fused_output persistable, so the memeory is not cleared
// we can make fused_output persistable, so the memory is not cleared
// and coalesce_tensor op do nothing if the inputs are already continue.
result->Get<details::ProgramDescs>(details::kProgramDescs).emplace_back();
......
......@@ -62,7 +62,7 @@ void ConstantFoldingPass::ApplyImpl(ir::Graph *graph) const {
PADDLE_ENFORCE_NOT_NULL(
scope,
platform::errors::Fatal(
"scope must not be null when applying constant floding."));
"scope must not be null when applying constant folding."));
std::vector<std::string> blacklist{"feed", "matrix_multiply", "save"};
......@@ -75,7 +75,7 @@ void ConstantFoldingPass::ApplyImpl(ir::Graph *graph) const {
continue;
bool input_persis = true;
// map is used to record how many time a name string occures in the whole
// map is used to record how many time a name string occurs in the whole
// graph's nodes
std::unordered_map<std::string, int> map;
for (auto in_node : op_node->inputs) {
......
......@@ -54,7 +54,7 @@ void TestMain(const std::string& conv_type) {
// ------------------------------------------------------------------
// (in, filters, bias_0) conv -> conv_out
// (conv_out, scale,
// bias_1, mean, varaince) batch_norm -> (...)
// bias_1, mean, variance) batch_norm -> (...)
Layers layers;
auto* in = layers.data("in", {1, 3, 20, 20});
auto* filters = layers.data("filters", {3, 3, 2, 2}, true);
......
......@@ -44,7 +44,7 @@ class FuseMomentumOpPass : public FuseOptimizerOpPass {
PADDLE_ENFORCE_GT(
momentum_ops.size(),
static_cast<size_t>(0),
platform::errors::InvalidArgument("Momentum ops must not be empyt."));
platform::errors::InvalidArgument("Momentum ops must not be empty."));
// Check attributions
// NOTE: If new attribution is added, the following code maybe need change.
......
......@@ -43,7 +43,7 @@ class FuseSgdOpPass : public FuseOptimizerOpPass {
PADDLE_ENFORCE_GT(
sgd_ops.size(),
static_cast<size_t>(0),
platform::errors::InvalidArgument("SGD ops must not be empyt."));
platform::errors::InvalidArgument("SGD ops must not be empty."));
// NOTE: fused_var is only exist in scope, so the graph doesn't have
// fused_var node.
......
......@@ -35,7 +35,7 @@ __device__ inline double Sqrt(double x) { return sqrt(x); }
)";
// List some bulit-in functions of __half implemented in cuda_fp16.hpp
// List some built-in functions of __half implemented in cuda_fp16.hpp
static constexpr char predefined_cuda_functions_fp16[] = R"(
#define __HALF_TO_US(var) *(reinterpret_cast<unsigned short *>(&(var)))
#define __HALF_TO_CUS(var) *(reinterpret_cast<const unsigned short *>(&(var)))
......
......@@ -134,7 +134,7 @@ void OperationMap::InsertUnaryElementwiseOperations() {
// cast:
// out = static_cast<T>(x)
// TODO(wangchaochaohu): This is not the compelete definition of
// TODO(wangchaochaohu): This is not the complete definition of
// cast Op, We need refine it later.
insert_handler("cast", "${0}", {});
......
......@@ -39,7 +39,7 @@ class AllReduceDelOpMaker : public framework::OpProtoAndCheckerMaker {
void Make() {
AddInput("X", "(Tensor), tensor to be allreduced.");
AddOutput("Out", "(Tensor) the result of allreduced.");
AddAttr<int>("reduce_type", "(int) determin the reduce type.")
AddAttr<int>("reduce_type", "(int) determine the reduce type.")
.SetDefault(0);
AddAttr<bool>(
"sync_mode",
......
......@@ -33,7 +33,7 @@ class BarrierOpMaker : public framework::OpProtoAndCheckerMaker {
AddAttr<int>("ring_id", "(int default 0) communication ring id.")
.SetDefault(0);
AddComment(R"DOC(
Barrier Operator - Barrier among all pariticapitors.)DOC");
Barrier Operator - Barrier among all participators.)DOC");
}
};
......
......@@ -124,7 +124,7 @@ class CCommInitAllOpMaker : public framework::OpProtoAndCheckerMaker {
AddComment(R"DOC(
CCommInitAll operator
Initialize all collective communicatoin context
Initialize all collective communication context
)DOC");
AddAttr<std::vector<int>>(
"devices",
......
......@@ -79,7 +79,7 @@ class CCommInitMultiTrainerOp : public framework::OperatorBase {
class CCommInitMultiTrainerOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() override {
AddInput("X", "Raw variable contains a NCCL UniqueId instaces.");
AddInput("X", "Raw variable contains a NCCL UniqueId instances.");
AddComment(R"DOC(
CCommInitMultiTrainer operator
......
......@@ -115,17 +115,17 @@ class CCommInitOp : public framework::OperatorBase {
class CCommInitOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() override {
AddInput("X", "Raw variable contains a NCCL UniqueId instaces.");
AddInput("X", "Raw variable contains a NCCL UniqueId instances.");
AddComment(R"DOC(
CCommInit operator
Initialize collective communicatoin context within this trainer
Initialize collective communication context within this trainer
)DOC");
AddAttr<int>("nranks", "(int) The number of ranks of distributed trainers");
AddAttr<int>("rank",
"(int) The rank of the trainer in distributed training.");
AddAttr<int>("device_id",
"(int) The deivce_id on which to initialize the communicator."
"(int) The device_id on which to initialize the communicator."
"Now, you only have to set this attr manually for pipeline "
"training. Otherwise, make it as default.")
.SetDefault(-1);
......
......@@ -58,7 +58,7 @@ class CEmbeddingOp : public framework::OperatorWithKernel {
(height > 0 && width > 0 && start_idx >= 0),
true,
platform::errors::InvalidArgument(
"height:%ld width:%ld start_idx:%ld must not have negtive values",
"height:%ld width:%ld start_idx:%ld must not have negative values",
height,
width,
start_idx));
......@@ -142,7 +142,7 @@ class CEmbeddingOpGrad : public framework::OperatorWithKernel {
(height > 0 && width > 0 && start_idx >= 0),
true,
platform::errors::InvalidArgument(
"height:%ld width:%ld start_idx:%ld must not have negtive values",
"height:%ld width:%ld start_idx:%ld must not have negative values",
height,
width,
start_idx));
......
......@@ -89,7 +89,7 @@ class CGenBKCLIdOp : public framework::OperatorBase {
class CGenBKCLIdOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() override {
AddOutput("Out", "Raw variable contains a BKCL UniqueId instaces.");
AddOutput("Out", "Raw variable contains a BKCL UniqueId instances.");
AddComment(R"DOC(
CGenBKCLId operator
......
......@@ -103,7 +103,7 @@ class CGenNCCLIdOp : public framework::OperatorBase {
class CGenNCCLIdOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() override {
AddOutput("Out", "Raw variable contains a NCCL UniqueId instaces.");
AddOutput("Out", "Raw variable contains a NCCL UniqueId instances.");
AddComment(R"DOC(
CGenNCCLId operator
......
......@@ -165,7 +165,7 @@ class GenBKCLIdOp : public framework::OperatorBase {
class GenBKCLIdOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() override {
AddOutput("BKCLID", "Raw variable contains a BKCL UniqueId instaces.");
AddOutput("BKCLID", "Raw variable contains a BKCL UniqueId instances.");
AddComment(R"DOC(
GenBKCLId operator
......@@ -186,11 +186,11 @@ For trainer 1~n: start a gRPC server to get the UniqueId, once got, stop the ser
.SetDefault(1);
AddAttr<bool>("use_hierarchical_allreduce",
"(bool default false) "
"Wheter to use hierarchical allreduce.")
"Whether to use hierarchical allreduce.")
.SetDefault(false);
AddAttr<int>("hierarchical_allreduce_inter_nranks",
"(int default 1) "
"Wheter to use hierarchical allreduce.")
"Whether to use hierarchical allreduce.")
.SetDefault(-1);
}
};
......
......@@ -222,7 +222,7 @@ class GenNCCLIdOp : public framework::OperatorBase {
class GenNCCLIdOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() override {
AddOutput("NCCLID", "Raw variable contains a NCCL UniqueId instaces.");
AddOutput("NCCLID", "Raw variable contains a NCCL UniqueId instances.");
AddComment(R"DOC(
GenNCCLId operator
......@@ -243,11 +243,11 @@ For trainer 1~n: start a gRPC server to get the UniqueId, once got, stop the ser
.SetDefault(1);
AddAttr<bool>("use_hierarchical_allreduce",
"(bool default false) "
"Wheter to use hierarchical allreduce.")
"Whether to use hierarchical allreduce.")
.SetDefault(false);
AddAttr<int>("hierarchical_allreduce_inter_nranks",
"(int default 1) "
"Wheter to use hierarchical allreduce.")
"Whether to use hierarchical allreduce.")
.SetDefault(-1);
}
};
......
......@@ -77,7 +77,7 @@ class GlobalGatherOpMaker : public framework::OpProtoAndCheckerMaker {
.SetDefault(false);
AddComment(R"DOC(
Global Gather Operator
Gather data in X to n_expert * world_size exeperts according to
Gather data in X to n_expert * world_size experts according to
local_count and receive tensors from n_expert * world_size experts according
to global_count.
)DOC");
......
......@@ -81,7 +81,7 @@ class GlobalScatterOpMaker : public framework::OpProtoAndCheckerMaker {
AddComment(R"DOC(
Global Scatter Operator
Scatter data in X which has been put together belong to one expert
to n_expert * world_size exeperts according to local_count
to n_expert * world_size experts according to local_count
and receive tensors from n_expert * world_size experts according
to global_count.
)DOC");
......
......@@ -314,7 +314,7 @@ static int ConnectAddr(const std::string& ep, const CommHead head) {
CHECK_SYS_CALL(SocketSend(sock, phead, sizeof(head)), "send");
ret_val = SocketRecv(sock, buffer, sizeof(head));
if (ret_val > 0 && memcmp(buffer, phead, sizeof(head)) == 0) {
// recv same message from recver, indicating that the link is correct
// recv same message from receiver, indicating that the link is correct
break; // accept client
} else {
VLOG(3) << "socket read failed with ret_val=" << ret_val;
......
......@@ -304,9 +304,9 @@ void SignalHandle(const char *data, int size) {
signal_info.replace(start_pos, useless_substr.length(), "");
*signal_msg_dunmer_ptr << " [SignalInfo: " << signal_info << "]\n";
// NOTE3: Final singal error message print.
// NOTE3: Final signal error message print.
// Here does not throw an exception,
// otherwise it will casue "terminate called recursively"
// otherwise it will cause "terminate called recursively"
std::ostringstream sout;
sout << "\n\n--------------------------------------\n";
sout << "C++ Traceback (most recent call last):";
......
......@@ -209,7 +209,7 @@ void BindCudaStream(py::module *m_ptr) {
Default: None.
Returns:
The recored event.
The record event.
Examples:
.. code-block:: python
......@@ -263,7 +263,7 @@ void BindCudaStream(py::module *m_ptr) {
auto place_tmp = platform::CUDAPlace(curr_device_id);
new (&self) phi::CUDAStream(place_tmp, priority - 2, stream_flag);
} else {
// seting priority 1(high) and 2(normal) correspond to the actual
// setting priority 1(high) and 2(normal) correspond to the actual
// cuda stream priority -1 and 0.
new (&self) phi::CUDAStream(*place, priority - 2, stream_flag);
}
......@@ -295,7 +295,7 @@ void BindCudaStream(py::module *m_ptr) {
}
auto stream_flag = phi::CUDAStream::StreamFlag::kStreamNonBlocking;
// seting priority 1(high) and 2(normal) correspond to the actual
// setting priority 1(high) and 2(normal) correspond to the actual
// cuda stream priority -1 and 0.
new (&self) phi::CUDAStream(
platform::CUDAPlace(device), priority - 2, stream_flag);
......
......@@ -313,7 +313,7 @@ void BindCustomDevicePy(py::module *m_ptr) {
Default: None.
Returns:
The recored event.
The record event.
Examples:
.. code-block:: python
......
......@@ -763,7 +763,7 @@ int TensorInit(PyObject* self, PyObject* args, PyObject* kwargs) {
}
} else if (args_num == (Py_ssize_t)1 || args_num == (Py_ssize_t)2 ||
args_num == (Py_ssize_t)3) {
// 1 to 3 position args, remainting arguments are kwargs
// 1 to 3 position args, remaining arguments are kwargs
PyObject* arg0_ptr = PyTuple_GET_ITEM(args, 0);
if (pybind11::detail::npy_api::get().PyArray_Check_(arg0_ptr)) {
VLOG(6) << "Calling case3's or case4's initializer.";
......@@ -798,7 +798,7 @@ int TensorInit(PyObject* self, PyObject* args, PyObject* kwargs) {
"constructor."));
}
} else if (args_num == (Py_ssize_t)4) {
// 4 position args, remainting arguments are kwargs
// 4 position args, remaining arguments are kwargs
PyObject* arg0_ptr = PyTuple_GET_ITEM(args, 0);
if (pybind11::detail::npy_api::get().PyArray_Check_(arg0_ptr)) {
VLOG(6) << "Calling case3's or case4's initializer.";
......@@ -808,7 +808,7 @@ int TensorInit(PyObject* self, PyObject* args, PyObject* kwargs) {
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"Incompatible constructor arguments, "
"there are 4 position args and remainting arguments arg kwargs,"
"there are 4 position args and remaining arguments arg kwargs,"
"but the first position args should be PyArray. "
"Please check your code and make sure the first position args is "
"PyArray."));
......@@ -856,7 +856,7 @@ int TensorInit(PyObject* self, PyObject* args, PyObject* kwargs) {
"Please check your code and make sure you call the existed "
"constructor."));
}
} else { // five position args, remainting arguments are kwargs
} else { // five position args, remaining arguments are kwargs
PyObject* arg0_ptr = PyTuple_GET_ITEM(args, 0);
if (pybind11::detail::npy_api::get().PyArray_Check_(arg0_ptr)) {
VLOG(6) << "Calling case3's or case4's initializer";
......@@ -866,7 +866,7 @@ int TensorInit(PyObject* self, PyObject* args, PyObject* kwargs) {
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"Incompatible constructor arguments, "
"there are 5 position args and remainting arguments are kwargs,"
"there are 5 position args and remaining arguments are kwargs,"
"but the first position args should be PyArray. "
"Please check your code and make sure the first position args is "
"PyArray."));
......@@ -879,11 +879,11 @@ int TensorInit(PyObject* self, PyObject* args, PyObject* kwargs) {
AutoInitTensorByPyArray(
py_tensor_ptr, kws_map, args, flag_kwargs, args_num);
return 0;
} else { // six position args, remainting arguments are kwargs, but this
} else { // six position args, remaining arguments are kwargs, but this
// is not a right way
PADDLE_THROW(platform::errors::InvalidArgument(
"Incompatible constructor arguments, "
"there are 6 position args and the remainting arguments are kwargs. "
"there are 6 position args and the remaining arguments are kwargs. "
"Please check your code and make sure the first position args is "
"PyArray."));
}
......@@ -1050,7 +1050,7 @@ int StringTensorInit(PyObject* self, PyObject* args, PyObject* kwargs) {
}
}
} else if (args_num == (Py_ssize_t)1) { // case 3 ~ 6
// 1 position args, remainting arguments are kwargs
// 1 position args, remaining arguments are kwargs
PyObject* arg0_ptr = PyTuple_GET_ITEM(args, 0);
if (pybind11::detail::npy_api::get().PyArray_Check_(arg0_ptr)) {
VLOG(6) << "Calling case3's or case4's string initializer.";
......
......@@ -131,7 +131,7 @@ static PyObject * %s(PyObject *self, PyObject *args, PyObject *kwargs)
const char* PYBIND_ITEM_TEMPLATE = R"( {"%s", (PyCFunction)(void(*)(void))%s, METH_VARARGS | METH_KEYWORDS, "C++ interface function for %s in dygraph."},)";
// These operators will skip automatical code generatrion and
// These operators will skip automatical code generation and
// need to be handwritten in CUSTOM_HANDWRITE_OP_FUNC_FILE
std::unordered_set<std::string> CUSTOM_HANDWRITE_OPS_SET = {"run_program"};
......@@ -216,7 +216,7 @@ std::string GenerateOpFunctionsBody(
for (auto& output : op_proto->outputs()) {
auto& out_name = output.name();
// skip those dispensable oututs
// skip those dispensable outputs
if (output.dispensable() && !FindOutsMap(op_type, out_name)) {
continue;
}
......@@ -341,7 +341,7 @@ std::string GenerateOpFunctionsBody(
function_args = paddle::string::Sprintf(FUNCTION_ARGS, input_args);
}
// generate op funtcion body
// generate op function body
auto op_function_str = paddle::string::Sprintf(OP_FUNCTION_TEMPLATE,
func_name,
ins_cast_str,
......@@ -410,7 +410,7 @@ GenerateOpFunctions() {
continue;
}
auto& op_type = op_proto->type();
// Skip operators that will be handwriten in CUSTOM_HANDWRITE_OP_FUNC_FILE.
// Skip operators that will be handwritten in CUSTOM_HANDWRITE_OP_FUNC_FILE.
if (CUSTOM_HANDWRITE_OPS_SET.count(op_type)) {
continue;
}
......
......@@ -295,7 +295,7 @@ static PyObject* tensor_method_numpy(TensorObject* self,
VLOG(6) << "Getting DenseTensor's numpy value";
auto dense_tensor =
std::dynamic_pointer_cast<phi::DenseTensor>(self->tensor.impl());
// TODO(qili93): temporary for ascned npu performance to be removed along
// TODO(qili93): temporary for ascend npu performance to be removed along
// with npu_identity op
paddle::Tensor temp_tensor(std::make_shared<phi::DenseTensor>());
if (dense_tensor->storage_properties_initialized()) {
......@@ -422,7 +422,7 @@ static void IncreaseTensorReferenceCountUntilCopyComplete(
// Note(dev): This is an empty callback, the only way is to "reference"
// inner memory Holder, so it will not be destructed until the kernels
// launched at current stream of given place is finished, such as
// CUDAPinned Mem -> CUDA by cudamemcpyAsync.
// CUDAPinned Mem -> CUDA by cudaMemcpyAsync.
auto callback = [tensor, place_]() {
VLOG(3) << "Run callback of Tensor:" << tensor.name() << " at place "
<< place_;
......
......@@ -41,8 +41,8 @@ extern PyTypeObject* p_tensor_type;
PyObject* tensor_properties_get_name(TensorObject* self, void* closure) {
EAGER_TRY
// NOTE(dev): [why not use egr::Controller::Instance::GernerateUniqueName()?]
// Beacause Controller must holder a tracer, but 'tensor.name' maybe called
// NOTE(dev): [why not use egr::Controller::Instance::GenerateUniqueName()?]
// Because Controller must holder a tracer, but 'tensor.name' maybe called
// everywhere such as static graph mode in @to_static, which means tracer is
// None.
static egr::UniqueNameGenerator name_generator;
......
......@@ -137,7 +137,7 @@ PyObject* pylayer_method_apply(PyObject* cls,
PyObject_GetAttrString(cls, "_backward_function");
if (!backward_function) {
PADDLE_THROW(paddle::platform::errors::InvalidArgument(
"Get _backward_function faild."));
"Get _backward_function failed."));
}
PyLayerObject* ctx = reinterpret_cast<PyLayerObject*>(
PyObject_CallFunctionObjArgs(backward_function, nullptr));
......@@ -255,7 +255,7 @@ PyObject* pylayer_method_apply(PyObject* cls,
auto forward_fn = PyObject_GetAttrString(cls, "forward");
if (!forward_fn) {
PADDLE_THROW(paddle::platform::errors::InvalidArgument(
"Get forward function faild."));
"Get forward function failed."));
}
bool trace_backward = egr::Controller::Instance().HasGrad();
egr::Controller::Instance().SetHasGrad(false);
......
......@@ -1362,7 +1362,7 @@ paddle::experimental::Scalar CastNumpy2Scalar(PyObject* obj,
return paddle::experimental::Scalar(value);
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"%s(): argument (position %d) is numpy.ndarry, the inner elements "
"%s(): argument (position %d) is numpy.ndarray, the inner elements "
"must be "
"numpy.float32/float64 now, but got %s",
op_type,
......
......@@ -481,7 +481,7 @@ static void VarBaseCopy(std::shared_ptr<imperative::VarBase> &src, // NOLINT
}
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"The destion Tensor(%s) can not copy when it is not empty.",
"The destination Tensor(%s) can not copy when it is not empty.",
dst.Name()));
}
}
......@@ -538,7 +538,7 @@ void BindImperative(py::module *m_ptr) {
"lists with different lengths.\n * Check the reader "
"function passed to 'set_(sample/sample_list/batch)"
"_generator' to locate the data causes this issue."));
// 2. construcct LoDTensor
// 2. construct LoDTensor
phi::DenseTensor t;
SetTensorFromPyArray<platform::CPUPlace>(
&t, array, platform::CPUPlace(), true);
......@@ -578,7 +578,7 @@ void BindImperative(py::module *m_ptr) {
"lists with different lengths.\n * Check the reader "
"function passed to 'set_(sample/sample_list/batch)"
"_generator' to locate the data causes this issue."));
// 2. construcct LoDTensor
// 2. construct LoDTensor
phi::DenseTensor t;
SetTensorFromPyArray<platform::CPUPlace>(
&t, array, platform::CPUPlace(), true);
......@@ -787,7 +787,7 @@ void BindImperative(py::module *m_ptr) {
// inplace operator for the VarBase self.
self->BumpInplaceVersion();
// 1. Check argumnets
// 1. Check arguments
bool parse_index = true;
// Check whether _index can be parsed.
......@@ -1512,7 +1512,7 @@ void BindImperative(py::module *m_ptr) {
} else {
PADDLE_THROW(platform::errors::Unimplemented(
"Imperative SelectedRows allreduce is not supported when "
"paddle is compiled with NCCL verison lower than v2.2.12. "
"paddle is compiled with NCCL version lower than v2.2.12. "
"You can set is_sparse=False for the Layer containing "
"this argument, such as Embedding(is_sparse=False)."));
}
......@@ -1586,7 +1586,7 @@ void BindImperative(py::module *m_ptr) {
This hook will be called every time the gradient of current Tensor has been fully calculated.
There are two differences with `_register_grad_hook`:
1. This backward hook will be executed after the gradient accumulation completed across batchs,
1. This backward hook will be executed after the gradient accumulation completed across batches,
but the hook registered by `_register_grad_hook` will be executed the gradient accumulation
completed in current batch.
2. This backward hook function should have the following signature:
......
......@@ -193,7 +193,7 @@ static PyObject *custom_eval_frame_shim(PyFrameObject *frame, int throw_flag) {
static PyObject *set_eval_frame(PyObject *new_callback, PyThreadState *tstate) {
// Change the eval frame callback and return the old one
// - None: disables: diable custom callback.
// - None: disables: disable custom callback.
// - Python callable(): enables custom callback.
// NOTE: Cache is not supported now
PyObject *old_callback = eval_frame_callback_get();
......
......@@ -301,7 +301,7 @@ void BindParallelExecutor(pybind11::module &m) { // NOLINT
PADDLE_ENFORCE_NE(self.IsFinalized(),
true,
platform::errors::PreconditionNotMet(
"BuildStrategy has been finlaized, cannot be "
"BuildStrategy has been finalized, cannot be "
"configured again."));
self.reduce_ = strategy;
},
......@@ -332,7 +332,7 @@ void BindParallelExecutor(pybind11::module &m) { // NOLINT
PADDLE_ENFORCE_NE(self.IsFinalized(),
true,
platform::errors::PreconditionNotMet(
"BuildStrategy has been finlaized, cannot be "
"BuildStrategy has been finalized, cannot be "
"configured again."));
self.gradient_scale_ = strategy;
},
......@@ -384,7 +384,7 @@ void BindParallelExecutor(pybind11::module &m) { // NOLINT
PADDLE_ENFORCE_NE(self.IsFinalized(),
true,
platform::errors::PreconditionNotMet(
"BuildStrategy has been finlaized, cannot be "
"BuildStrategy has been finalized, cannot be "
"configured again."));
self.debug_graphviz_path_ = path;
},
......@@ -412,7 +412,7 @@ void BindParallelExecutor(pybind11::module &m) { // NOLINT
PADDLE_ENFORCE_NE(self.IsFinalized(),
true,
platform::errors::PreconditionNotMet(
"BuildStrategy has been finlaized, cannot be "
"BuildStrategy has been finalized, cannot be "
"configured again."));
self.enable_sequential_execution_ = b;
},
......@@ -439,7 +439,7 @@ void BindParallelExecutor(pybind11::module &m) { // NOLINT
PADDLE_ENFORCE_NE(self.IsFinalized(),
true,
platform::errors::PreconditionNotMet(
"BuildStrategy has been finlaized, cannot be "
"BuildStrategy has been finalized, cannot be "
"configured again."));
self.remove_unnecessary_lock_ = b;
},
......@@ -515,7 +515,7 @@ void BindParallelExecutor(pybind11::module &m) { // NOLINT
PADDLE_ENFORCE_NE(self.IsFinalized(),
true,
platform::errors::PreconditionNotMet(
"BuildStrategy has been finlaized, "
"BuildStrategy has been finalized, "
"cannot be configured again."));
self.build_cinn_pass_ = b;
},
......@@ -544,7 +544,7 @@ void BindParallelExecutor(pybind11::module &m) { // NOLINT
PADDLE_ENFORCE_NE(self.IsFinalized(),
true,
platform::errors::PreconditionNotMet(
"BuildStrategy has been finlaized, cannot be "
"BuildStrategy has been finalized, cannot be "
"configured again."));
self.fuse_elewise_add_act_ops_ = b;
},
......@@ -570,7 +570,7 @@ void BindParallelExecutor(pybind11::module &m) { // NOLINT
PADDLE_ENFORCE_NE(self.IsFinalized(),
true,
platform::errors::PreconditionNotMet(
"BuildStrategy has been finlaized, cannot be "
"BuildStrategy has been finalized, cannot be "
"configured again."));
self.fuse_gemm_epilogue_ = b;
},
......@@ -596,7 +596,7 @@ void BindParallelExecutor(pybind11::module &m) { // NOLINT
PADDLE_ENFORCE_NE(self.IsFinalized(),
true,
platform::errors::PreconditionNotMet(
"BuildStrategy has been finlaized, cannot be "
"BuildStrategy has been finalized, cannot be "
"configured again."));
self.fuse_adamw_ = b;
},
......@@ -618,7 +618,7 @@ void BindParallelExecutor(pybind11::module &m) { // NOLINT
PADDLE_ENFORCE_NE(self.IsFinalized(),
true,
platform::errors::PreconditionNotMet(
"BuildStrategy has been finlaized, cannot be "
"BuildStrategy has been finalized, cannot be "
"configured again."));
self.fused_attention_ = b;
},
......@@ -644,7 +644,7 @@ void BindParallelExecutor(pybind11::module &m) { // NOLINT
PADDLE_ENFORCE_NE(self.IsFinalized(),
true,
platform::errors::PreconditionNotMet(
"BuildStrategy has been finlaized, cannot be "
"BuildStrategy has been finalized, cannot be "
"configured again."));
self.fused_feedforward_ = b;
},
......@@ -670,7 +670,7 @@ void BindParallelExecutor(pybind11::module &m) { // NOLINT
PADDLE_ENFORCE_NE(self.IsFinalized(),
true,
platform::errors::PreconditionNotMet(
"BuildStrategy has been finlaized, cannot be "
"BuildStrategy has been finalized, cannot be "
"configured again."));
self.sequential_run_ = b;
},
......@@ -695,7 +695,7 @@ void BindParallelExecutor(pybind11::module &m) { // NOLINT
PADDLE_ENFORCE_NE(self.IsFinalized(),
true,
platform::errors::PreconditionNotMet(
"BuildStrategy has been finlaized, cannot be "
"BuildStrategy has been finalized, cannot be "
"configured again."));
self.fuse_bn_act_ops_ = b;
},
......@@ -721,7 +721,7 @@ void BindParallelExecutor(pybind11::module &m) { // NOLINT
PADDLE_ENFORCE_NE(self.IsFinalized(),
true,
platform::errors::PreconditionNotMet(
"BuildStrategy has been finlaized, cannot be "
"BuildStrategy has been finalized, cannot be "
"configured again."));
self.fuse_bn_add_act_ops_ = b;
},
......@@ -747,7 +747,7 @@ void BindParallelExecutor(pybind11::module &m) { // NOLINT
PADDLE_ENFORCE_NE(self.IsFinalized(),
true,
platform::errors::PreconditionNotMet(
"BuildStrategy has been finlaized, cannot be "
"BuildStrategy has been finalized, cannot be "
"configured again."));
self.enable_auto_fusion_ = b;
},
......@@ -776,7 +776,7 @@ void BindParallelExecutor(pybind11::module &m) { // NOLINT
PADDLE_ENFORCE_NE(self.IsFinalized(),
true,
platform::errors::PreconditionNotMet(
"BuildStrategy has been finlaized, cannot be "
"BuildStrategy has been finalized, cannot be "
"configured again."));
self.fuse_relu_depthwise_conv_ = b;
},
......@@ -807,7 +807,7 @@ void BindParallelExecutor(pybind11::module &m) { // NOLINT
PADDLE_ENFORCE_NE(self.IsFinalized(),
true,
platform::errors::PreconditionNotMet(
"BuildStrategy has been finlaized, "
"BuildStrategy has been finalized, "
"cannot be configured again."));
self.fuse_broadcast_ops_ = b;
},
......@@ -839,7 +839,7 @@ void BindParallelExecutor(pybind11::module &m) { // NOLINT
PADDLE_ENFORCE_NE(self.IsFinalized(),
true,
platform::errors::PreconditionNotMet(
"BuildStrategy has been finlaized, "
"BuildStrategy has been finalized, "
"cannot be configured again."));
self.fuse_all_optimizer_ops_ = b;
})
......@@ -850,7 +850,7 @@ void BindParallelExecutor(pybind11::module &m) { // NOLINT
PADDLE_ENFORCE_NE(self.IsFinalized(),
true,
platform::errors::PreconditionNotMet(
"BuildStrategy has been finlaized, cannot be "
"BuildStrategy has been finalized, cannot be "
"configured again."));
self.sync_batch_norm_ = b;
},
......
......@@ -434,8 +434,8 @@ void BindOpDesc(pybind11::module *m) {
.def("set_serialized_attr",
[](pd::OpDesc &self,
const std::string &name,
const pybind11::bytes &seriralized) {
std::string ser(seriralized);
const pybind11::bytes &serialized) {
std::string ser(serialized);
self.SetAttr(name, ser);
})
.def("_block_attr_id", &pd::OpDesc::GetBlockAttrId)
......@@ -484,8 +484,8 @@ void BindOpDesc(pybind11::module *m) {
return self.to<bool>();
case phi::DataType::COMPLEX64:
case phi::DataType::COMPLEX128:
// to paddle's complex to avoid ambiguious
// when converting bfloat16 or float16 to std::copmplex<double>
// to paddle's complex to avoid ambiguous
// when converting bfloat16 or float16 to std::complex<double>
return static_cast<std::complex<double>>(
self.to<phi::dtype::complex<double>>());
default:
......
......@@ -320,7 +320,7 @@ class MultiDeviceFeedReader {
PADDLE_ENFORCE_EQ(status,
Status::kSuccess,
platform::errors::NotFound(
"The function executed sucessfully, but "
"The function executed successfully, but "
"the result status is not Status::kSuccess"));
}
......
......@@ -46,7 +46,7 @@ namespace ir {
};
///
/// \brief Define Parameteric AttributeStorage for StrAttribute.
/// \brief Define Parametric AttributeStorage for StrAttribute.
///
struct StrAttributeStorage : public AttributeStorage {
using ParamKey = std::string;
......
......@@ -66,7 +66,7 @@ class IrContextImpl {
std::lock_guard<ir::SpinLock> guard(registed_abstract_types_lock_);
auto iter = registed_abstract_types_.find(type_id);
if (iter != registed_abstract_types_.end()) {
VLOG(4) << "Fonund a cached abstract_type of: [TypeId_hash="
VLOG(4) << "Found a cached abstract_type of: [TypeId_hash="
<< std::hash<ir::TypeId>()(type_id)
<< ", AbstractType_ptr=" << iter->second << "].";
return iter->second;
......@@ -89,7 +89,7 @@ class IrContextImpl {
std::lock_guard<ir::SpinLock> guard(registed_abstract_attributes_lock_);
auto iter = registed_abstract_attributes_.find(type_id);
if (iter != registed_abstract_attributes_.end()) {
VLOG(4) << "Fonund a cached abstract_attribute of: [TypeId_hash="
VLOG(4) << "Found a cached abstract_attribute of: [TypeId_hash="
<< std::hash<ir::TypeId>()(type_id)
<< ", AbstractAttribute_ptr=" << iter->second << "].";
return iter->second;
......@@ -110,7 +110,7 @@ class IrContextImpl {
std::lock_guard<ir::SpinLock> guard(registed_op_infos_lock_);
auto iter = registed_op_infos_.find(name);
if (iter != registed_op_infos_.end()) {
VLOG(4) << "Fonund a cached operation of: [name=" << name
VLOG(4) << "Found a cached operation of: [name=" << name
<< ", OpInfoImpl ptr=" << iter->second << "].";
return iter->second;
}
......@@ -129,11 +129,11 @@ class IrContextImpl {
std::lock_guard<ir::SpinLock> guard(registed_dialect_lock_);
auto iter = registed_dialect_.find(name);
if (iter != registed_dialect_.end()) {
VLOG(4) << "Fonund a cached dialect of: [name=" << name
VLOG(4) << "Found a cached dialect of: [name=" << name
<< ", dialect_ptr=" << iter->second << "].";
return iter->second;
}
LOG(WARNING) << "No cache fonund dialect of: [name=" << name << "].";
LOG(WARNING) << "No cache found dialect of: [name=" << name << "].";
return nullptr;
}
......@@ -156,7 +156,7 @@ class IrContextImpl {
// AttributeStorage uniquer and cache instances.
StorageManager registed_attribute_storage_manager_;
// The dialcet registered in the context.
// The dialect registered in the context.
std::unordered_map<std::string, Dialect *> registed_dialect_;
ir::SpinLock registed_dialect_lock_;
......
......@@ -118,7 +118,7 @@ void Operation::destroy() {
}
reinterpret_cast<Operation *>(base_ptr)->~Operation();
base_ptr += sizeof(Operation);
// 2.3. Deconstruct OpOpOerand.
// 2.3. Deconstruct OpOperand.
for (size_t idx = 0; idx < num_operands_; idx++) {
reinterpret_cast<detail::OpOperandImpl *>(base_ptr)->~OpOperandImpl();
base_ptr += sizeof(detail::OpOperandImpl);
......
......@@ -39,7 +39,7 @@ class alignas(8) Operation final {
static Operation *create(const OperationArgument &op_argument);
///
/// \brief Destroy the operation objects and free memeory by create().
/// \brief Destroy the operation objects and free memory by create().
///
void destroy();
......
......@@ -72,7 +72,7 @@ StorageManager::StorageBase *StorageManager::GetParametricStorageImpl(
std::function<bool(const StorageBase *)> equal_func,
std::function<StorageBase *()> constructor) {
std::lock_guard<ir::SpinLock> guard(parametric_instance_lock_);
VLOG(4) << "Try to get a parameteretric storage of: [TypeId_hash="
VLOG(4) << "Try to get a parametric storage of: [TypeId_hash="
<< std::hash<ir::TypeId>()(type_id) << ", param_hash=" << hash_value
<< "].";
if (parametric_instance_.find(type_id) == parametric_instance_.end())
......
......@@ -77,7 +77,7 @@ class AbstractType {
private:
///
/// \brief The constructor is set to private and provides the user with the
/// get method to obtain and manage the AstractType.
/// get method to obtain and manage the AbstractType.
///
/// \param type_id The type id of the AbstractType.
/// \param dialect The Dialect which the type registered to.
......
......@@ -55,7 +55,7 @@ class OpOperandImpl {
};
///
/// \brief ValueImpl is the base class of all drived Value classes such as
/// \brief ValueImpl is the base class of all derived Value classes such as
/// OpResultImpl. This class defines all the information and usage interface in
/// the IR Value. Each Value include three attributes:
/// (1) type: ir::Type; (2) UD-chain of value: OpOperandImpl*, first operand
......
......@@ -180,7 +180,7 @@ def save(
filepath: saved path
src: the audio tensor
sample_rate: the number of samples of audio per second.
channels_first: src channel infomation
channels_first: src channel information
if True, means input tensor is (channels, time)
if False, means input tensor is (time, channels)
encoding: audio encoding format, wave_backend only support PCM16 now.
......@@ -216,7 +216,7 @@ def save(
# only support PCM16
if bits_per_sample not in (None, 16):
raise ValueError("Invalid bits_per_sample, only supprt 16 bit")
raise ValueError("Invalid bits_per_sample, only support 16 bit")
sample_width = int(bits_per_sample / 8) # 2
......
......@@ -44,7 +44,7 @@ class AudioClassificationDataset(paddle.io.Dataset):
files (:obj:`List[str]`): A list of absolute path of audio files.
labels (:obj:`List[int]`): Labels of audio files.
feat_type (:obj:`str`, `optional`, defaults to `raw`):
It identifies the feature type that user wants to extrace of an audio file.
It identifies the feature type that user wants to extract an audio file.
"""
super().__init__()
......
......@@ -187,7 +187,7 @@ class Hessian(Jacobian):
class _Jacobian:
"""The base class for computing Jacobian matrix.
``_Jacobian`` implementes the core logic of multidimensional index and lazy
``_Jacobian`` implements the core logic of multidimensional index and lazy
evaluation for Jacobian matrix, subclass only need to overwrite following
methods:
......@@ -436,7 +436,7 @@ def _multi_index(indexes, shape):
index.start + shape[i] if index.start < 0 else index.start,
index.stop + shape[i] if index.stop < 0 else index.stop,
# Negative step means index backward, no need to convert to
# positive interger.
# positive integer.
index.step,
)
)
......@@ -690,9 +690,9 @@ def _grad_for_jacobian(ys, xs, v=None):
inputs.
"""
if paddle.in_dynamic_mode():
# paddle.grad returns a list though the inputs is a signle Tensor. The
# paddle.grad returns a list though the inputs is a single Tensor. The
# follow code snippet fixes the problem by return the first element of
# xs_grad when the xs is a signle Tensor.
# xs_grad when the xs is a single Tensor.
xs_grad = paddle.grad(ys, xs, v, create_graph=True, allow_unused=True)
if (
isinstance(xs, paddle.fluid.framework.Variable)
......
......@@ -67,7 +67,7 @@ def batch(reader, batch_size, drop_last=False):
batch_size = int(batch_size)
if batch_size <= 0:
raise ValueError(
"batch_size should be a positive integeral value, "
"batch_size should be a positive integer value, "
"but got batch_size={}".format(batch_size)
)
......
......@@ -82,7 +82,7 @@ class DistributedDropoutImpl0(DistributedElementwiseImpl0):
and src_op.attr("seed")
):
_logger.info(
"Auto Parallel Random Control Skiped Since manul seed is set by user: {}".format(
"Auto Parallel Random Control Skipped Since manul seed is set by user: {}".format(
src_op
)
)
......@@ -90,7 +90,7 @@ class DistributedDropoutImpl0(DistributedElementwiseImpl0):
pass
# NOTE Adopt for recompute
# If user already set seed, We should not modify it. But if the seed is added by recompute pass, it should be under control.
# TODO in future recompute pass should happen after parallel partitione. and remove this at that time.
# TODO in future recompute pass should happen after parallel partition. and remove this at that time.
elif len(kwargs['Seed']) > 0 or len(src_op.input("Seed")) > 0:
seed_var_name = kwargs['Seed'][0]
if seed_var_name.startswith('rc_seed'):
......@@ -115,7 +115,7 @@ class DistributedDropoutImpl0(DistributedElementwiseImpl0):
pre_op._set_attr("force_cpu", True)
else:
_logger.info(
"Auto Parallel Random Control Skiped Since manul seed is set by user: {}".format(
"Auto Parallel Random Control Skipped Since manul seed is set by user: {}".format(
src_op
)
)
......
......@@ -87,7 +87,7 @@ class DistributedDropoutImpl0(DistributedElementwiseImpl0):
and src_op.attr("seed")
):
_logger.info(
"Auto Parallel Random Control Skiped Since manul seed is set by user: {}".format(
"Auto Parallel Random Control Skipped Since manul seed is set by user: {}".format(
src_op
)
)
......@@ -120,7 +120,7 @@ class DistributedDropoutImpl0(DistributedElementwiseImpl0):
pre_op._set_attr("force_cpu", True)
else:
_logger.info(
"Auto Parallel Random Control Skiped Since manul seed is set by user: {}".format(
"Auto Parallel Random Control Skipped Since manul seed is set by user: {}".format(
src_op
)
)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册