未验证 提交 447d79da 编写于 作者: H HongyuJia 提交者: GitHub

polish code comment, test=doc (#45859)

上级 d3f52bcc
......@@ -680,8 +680,7 @@ class OperatorWithKernel : public OperatorBase {
/**
* Transfer data from scope to a transferred scope. If there is no data need
* to
* be tranfered, it returns nullptr.
* to be transferred, it returns nullptr.
*
* * transfered_inplace_vars is a output vector.
*/
......
......@@ -517,7 +517,7 @@ static void OpBaseRunImpl(const framework::OperatorBase& op,
* `transfer_scope` is created before PrepareData, the data after
* transform is stored in the temporary scope, and then discarded
* after the execution of op, but the original input is directly
* overwritten in the previous dynamic graph implemention.
* overwritten in the previous dynamic graph implementation.
*/
auto prepared_op =
PreparedOp::Prepare(ins, outs, *op_kernel, place, attrs, default_attrs);
......
......@@ -139,7 +139,7 @@ class Tracer {
// intermediate var both in imperative and static mode. But the
// `UniqueNameGenerator` in C++ and `unique_name.py` in Python doesn't share
// the same auto-increment id. It will create a variable repeatedly with same
// name like `tmp_0` in some cases when transform dygraph into static layers.
// name like `tmp_0` in some cases when transform dygraph into static layers.
// So we modify the default prefix key into `eager_tmp` to distinguish with
// static graph.
std::string GenerateUniqueName(std::string key = "dygraph_tmp") {
......
......@@ -62,12 +62,12 @@ class MatMulV2Op : public framework::OperatorWithKernel {
0,
platform::errors::InvalidArgument(
"The Input(X) dims size must be greater than 0,"
" but reviced dims size is 0. "));
" but received dims size is 0. "));
PADDLE_ENFORCE_GT(ndims_y,
0,
platform::errors::InvalidArgument(
"The Input(Y) dims size must be greater than 0,"
" but reviced dims size is 0. "));
" but received dims size is 0. "));
bool x_broadcasted = false, y_broadcasted = false;
if (ndims_x == 1) {
......@@ -160,9 +160,7 @@ class MatMulV2Op : public framework::OperatorWithKernel {
} else {
#ifdef PADDLE_WITH_MKLDNN
// When matmul_v2 is first oneDNN op in a chain (there was some non oneDNN
// op
// previously)
// then we also need to rotate shape NHWC -> NCWH
// op previously) then we also need to rotate shape NHWC -> NCWH
if ((expected_kernel_type.data_layout_ ==
framework::DataLayout::kMKLDNN) &&
(tensor.layout() != framework::DataLayout::kMKLDNN) &&
......
......@@ -59,7 +59,7 @@ struct DefaultDeviceContextType<AllocationType::GPU> {
*
* Note: DeviceContextPool is an experimental API and may be removed in the
* future. From 2.3, we recommend directly using the C++ API to combine new
* perators.
* operators.
*/
class PADDLE_API DeviceContextPool {
public:
......
......@@ -61,7 +61,7 @@ enum class Backend : uint8_t {
NUM_BACKENDS,
/**
* [ Why we need ALL in baisc kernel key member? ]
* [ Why we need ALL in basic kernel key member? ]
*
* For Tensor, ALL represents an illegal Backend, but for Kernel, some
* kernels may be device-independent by nature, such as reshape; and when
......
......@@ -210,7 +210,7 @@ class KernelArgsDef {
class Kernel {
public:
// for map element contruct
// for map element construct
Kernel() = default;
explicit Kernel(KernelFn fn, void* variadic_fn)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册