未验证 提交 cd0f1523 编写于 作者: R Ruibin Cheung 提交者: GitHub

[clang-tidy] enable modernize-use-override (#55491)

上级 2d98758c
......@@ -185,7 +185,7 @@ Checks: '
-modernize-use-equals-delete,
-modernize-use-noexcept,
-modernize-use-nullptr,
-modernize-use-override,
modernize-use-override,
-modernize-use-transparent-functors,
-modernize-use-uncaught-exceptions,
-performance-faster-string-find,
......
......@@ -69,7 +69,7 @@ double GetFuseParameterMemorySize() { return FLAGS_fuse_parameter_memory_size; }
class CoalesceGradTensorPass : public ir::Pass {
protected:
void ApplyImpl(ir::Graph *graph) const {
void ApplyImpl(ir::Graph *graph) const override {
if (Get<size_t>(details::kNRanks) <= 1) {
VLOG(6) << "The number of place is" << Get<size_t>(details::kNRanks)
<< ", there doesn't need apply FuseAllReduceOpPass.";
......
......@@ -30,9 +30,9 @@ class Node;
class FuseAdamOpPass : public FuseOptimizerOpPass {
private:
const std::string GetOpType() const { return "adam"; }
const std::string GetOpType() const override { return "adam"; }
const std::vector<std::string> GetAuxiliaryVarNames() const {
const std::vector<std::string> GetAuxiliaryVarNames() const override {
return {"Moment1", "Moment2", "Beta1Pow", "Beta2Pow"};
}
......@@ -41,7 +41,7 @@ class FuseAdamOpPass : public FuseOptimizerOpPass {
&aux_var_set,
const std::unordered_map<std::string, std::string> &fused_vars_name,
const std::vector<ir::Node *> &adam_ops,
ir::Graph *graph) const {
ir::Graph *graph) const override {
auto fused_adam_node =
FuseAdamOps(aux_var_set, fused_vars_name, adam_ops, graph);
return fused_adam_node;
......
......@@ -29,18 +29,18 @@ class Node;
class FuseMomentumOpPass : public FuseOptimizerOpPass {
private:
virtual const std::string GetOpType() const { return "momentum"; }
const std::string GetOpType() const override { return "momentum"; }
virtual const std::vector<std::string> GetAuxiliaryVarNames() const {
const std::vector<std::string> GetAuxiliaryVarNames() const override {
return {"Velocity"};
}
// Fuse Momentum Ops
virtual ir::Node *FuseOptimizerOps(
ir::Node *FuseOptimizerOps(
const std::unordered_map<std::string, std::vector<std::string>> &vars_set,
const std::unordered_map<std::string, std::string> &fused_vars_name,
const std::vector<ir::Node *> &momentum_ops,
ir::Graph *graph) const {
ir::Graph *graph) const override {
PADDLE_ENFORCE_GT(
momentum_ops.size(),
static_cast<size_t>(0),
......
......@@ -28,18 +28,18 @@ class Node;
class FuseSgdOpPass : public FuseOptimizerOpPass {
private:
virtual const std::string GetOpType() const { return "sgd"; }
const std::string GetOpType() const override { return "sgd"; }
virtual const std::vector<std::string> GetAuxiliaryVarNames() const {
const std::vector<std::string> GetAuxiliaryVarNames() const override {
return {};
}
// Fuse Sgd Ops
virtual ir::Node *FuseOptimizerOps(
ir::Node *FuseOptimizerOps(
const std::unordered_map<std::string, std::vector<std::string>> &vars_set,
const std::unordered_map<std::string, std::string> &fused_vars_name,
const std::vector<ir::Node *> &sgd_ops,
ir::Graph *graph) const {
ir::Graph *graph) const override {
PADDLE_ENFORCE_GT(
sgd_ops.size(),
static_cast<size_t>(0),
......
......@@ -53,7 +53,7 @@ class WorkQueueImpl : public WorkQueue {
options_.always_spinning);
}
virtual ~WorkQueueImpl() {
~WorkQueueImpl() override {
delete queue_;
if (tracker_ != nullptr) {
tracker_->~TaskTracker();
......@@ -94,7 +94,7 @@ class WorkQueueGroupImpl : public WorkQueueGroup {
explicit WorkQueueGroupImpl(
const std::vector<WorkQueueOptions>& queue_options);
~WorkQueueGroupImpl();
~WorkQueueGroupImpl() override;
void AddTask(size_t queue_idx, std::function<void()> fn) override;
......
......@@ -40,7 +40,7 @@ class KernelArgsNameMakerByOpProto : public KernelArgsNameMaker {
platform::errors::InvalidArgument("Op proto cannot be nullptr."));
}
~KernelArgsNameMakerByOpProto() {}
~KernelArgsNameMakerByOpProto() override {}
const paddle::small_vector<const char*>& GetInputArgsNames() override;
const paddle::small_vector<const char*>& GetOutputArgsNames() override;
......
......@@ -127,7 +127,7 @@ class CUDAGraphAllocator
: underlying_allocator_(allocator) {}
public:
~CUDAGraphAllocator() {}
~CUDAGraphAllocator() override {}
static std::shared_ptr<Allocator> Create(
const std::shared_ptr<Allocator>& allocator) {
......@@ -135,14 +135,14 @@ class CUDAGraphAllocator
}
protected:
phi::Allocation* AllocateImpl(size_t size) {
phi::Allocation* AllocateImpl(size_t size) override {
VLOG(10) << "Allocate " << size << " for CUDA Graph";
return new PrivateAllocation(this,
static_unique_ptr_cast<Allocation>(
underlying_allocator_->Allocate(size)));
}
void FreeImpl(phi::Allocation* allocation) {
void FreeImpl(phi::Allocation* allocation) override {
VLOG(10) << "delete for CUDA Graph";
delete allocation;
}
......
......@@ -36,7 +36,7 @@ class AllReduceDelOp : public framework::OperatorWithKernel {
class AllReduceDelOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() {
void Make() override {
AddInput("X", "(Tensor), tensor to be allreduced.");
AddOutput("Out", "(Tensor) the result of allreduced.");
AddAttr<int>("reduce_type", "(int) determine the reduce type.")
......
......@@ -45,7 +45,7 @@ class AllToAllBaseOp : public framework::OperatorWithKernel {
class AllToAllBaseOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() {
void Make() override {
AddInput("X", "(Tensor) tensor send.");
AddOutput("Out", "(Tensor) the result of alltoall.");
AddAttr<int>("ring_id", "(int default 0) nccl communication ring id.")
......
......@@ -27,7 +27,7 @@ class BarrierOp : public framework::OperatorWithKernel {
class BarrierOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() {
void Make() override {
AddInput("X", "(Tensor) Input data (only used in CUDAKernel).");
AddOutput("Out", "(Tensor) Output data (only used in CUDAKernel).");
AddAttr<int>("ring_id", "(int default 0) communication ring id.")
......
......@@ -44,7 +44,7 @@ class CAllGatherOp : public framework::OperatorWithKernel {
class CAllGatherOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() {
void Make() override {
AddInput("X", "(Tensor) tensor to be allgather");
AddOutput("Out", "(Tensor) the allgather result");
AddAttr<int>("ring_id", "(int default 0) communication ring id.")
......
......@@ -35,7 +35,7 @@ class CBroadcastOp : public framework::OperatorWithKernel {
class CBroadcastOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() {
void Make() override {
AddInput("X", "(Tensor) tensor to be broadcasted.");
AddOutput("Out", "(Tensor) the result of broadcast.");
AddAttr<int>("ring_id", "(int default 0) nccl communication ring id.")
......
......@@ -38,7 +38,7 @@ namespace operators {
class CCommInitAllInferShape : public framework::InferShapeBase {
public:
~CCommInitAllInferShape() {}
~CCommInitAllInferShape() override {}
void operator()(framework::InferShapeContext* ctx) const override{};
};
......
......@@ -38,7 +38,7 @@ namespace operators {
class CCommInitMultiTrainerInferShape : public framework::InferShapeBase {
public:
~CCommInitMultiTrainerInferShape() {}
~CCommInitMultiTrainerInferShape() override {}
void operator()(framework::InferShapeContext* ctx) const override{};
};
......
......@@ -81,7 +81,7 @@ class CConcatOpGradMaker : public framework::SingleGradOpMaker<T> {
class CConcatOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() {
void Make() override {
AddInput("X", "(Tensor) tensor to be concated.");
AddOutput("Out", "(Tensor) the result of concat.");
AddAttr<int>("rank", "(int default 0) rank id.").SetDefault(0);
......
......@@ -44,7 +44,7 @@ class CIdentityOp : public framework::OperatorWithKernel {
class CIdentityOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() {
void Make() override {
AddInput("X", "(Tensor) identity tensor.");
AddOutput("Out", "(Tensor) identity tensor.");
AddAttr<int>("ring_id", "(int default 0) nccl communication ring id.")
......
......@@ -42,7 +42,7 @@ class CReduceScatterOp : public framework::OperatorWithKernel {
class CReduceScatterOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() {
void Make() override {
AddInput("X", "(Tensor) tensor to be allgather");
AddOutput("Out", "(Tensor) the allgather result");
AddAttr<int>("ring_id", "(int default 0) communication ring id.")
......
......@@ -61,7 +61,7 @@ class CScatterOp : public framework::OperatorWithKernel {
class CScatterOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() {
void Make() override {
AddInput("X", "(Tensor) tensor to be broadcasted.");
AddOutput("Out", "(Tensor) the result of broadcast.");
AddAttr<int>("ring_id", "(int default 0) nccl communication ring id.")
......
......@@ -83,7 +83,7 @@ class CSoftmaxWithCrossEntropyOp : public framework::OperatorWithKernel {
class CSoftmaxWithCrossEntropyOpMaker
: public framework::OpProtoAndCheckerMaker {
public:
void Make() {
void Make() override {
AddInput("Logits",
"(Tensor, default: Tensor<float>), The input tensor of unscaled "
"log probabilities, whose dimension :attr:`axis` should be scaled "
......
......@@ -89,7 +89,7 @@ class CSplitOpGradMaker : public framework::SingleGradOpMaker<T> {
class CSplitOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() {
void Make() override {
AddInput("X", "(Tensor) tensor to be split.");
AddOutput("Out", "(Tensor) the result of split.");
AddAttr<int>("rank", "(int default 0) rank id.").SetDefault(0);
......
......@@ -18,7 +18,7 @@ namespace operators {
class CSyncCalcStreamOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() {
void Make() override {
AddInput("X", "(Tensor) Dependency of the variable need to sync");
AddOutput("Out", "(Tensor) Dependency of the variable need to sync");
AddComment(R"DOC(
......
......@@ -31,7 +31,7 @@ class CSyncCommStreamOp : public framework::OperatorWithKernel {
class CSyncCommStreamOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() {
void Make() override {
AddInput("X", "(Tensor) Dependency of the variable need to sync")
.AsDuplicable();
AddOutput("Out", "(Tensor) Dependency of the variable need to sync")
......
......@@ -73,7 +73,7 @@ class CWaitCommOp : public framework::OperatorBase {
class CWaitCommOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() {
void Make() override {
AddInput("X", "(Tensor) Dependency of the variable need to sync")
.AsDuplicable();
AddOutput("Out", "(Tensor) Dependency of the variable need to sync")
......
......@@ -74,7 +74,7 @@ class CWaitComputeOp : public framework::OperatorBase {
class CWaitComputeOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() {
void Make() override {
AddInput("X", "(Tensor) Dependency of the variable need to sync")
.AsDuplicable();
AddOutput("Out", "(Tensor) Dependency of the variable need to sync")
......
......@@ -58,7 +58,7 @@ class GlobalGatherOp : public framework::OperatorWithKernel {
class GlobalGatherOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() {
void Make() override {
AddInput("X", "(Tensor) tensor send.");
AddInput("local_count",
"(Tensor) Tensor which has n_expert * world_size elements that "
......
......@@ -61,7 +61,7 @@ class GlobalScatterOp : public framework::OperatorWithKernel {
class GlobalScatterOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() {
void Make() override {
AddInput("X", "(Tensor) tensor send.");
AddInput("local_count",
"(Tensor) Tensor which has n_expert * world_size elements that "
......
......@@ -37,7 +37,7 @@ class MpAllReduceSumOp : public framework::OperatorWithKernel {
class MpAllReduceSumOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() {
void Make() override {
AddInput("X", "(Tensor), tensor to be allreduced in model parallel.");
AddOutput("Out", "(Tensor) the allreduced result in model parallel.");
AddAttr<int>("ring_id", "(int default 0) communication ring id.")
......
......@@ -45,7 +45,7 @@ class PartialAllGatherOp : public framework::OperatorWithKernel {
class PartialAllGatherOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() {
void Make() override {
AddInput("X", "(Tensor) tensor to be partial allgather");
AddOutput("Out", "(Tensor) the allgather result");
AddAttr<int>("ring_id", "(int default 0) communication ring id.")
......
......@@ -91,7 +91,7 @@ class PartialRecvOp : public framework::OperatorWithKernel {
class PartialRecvOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() {
void Make() override {
AddOutput("Out", "(Tensor) tensor to receive.");
AddAttr<int>("ring_id", "(int default 0) nccl communication ring id.")
.SetDefault(0);
......
......@@ -60,7 +60,7 @@ class PartialSendOp : public framework::OperatorWithKernel {
class PartialSendMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() {
void Make() override {
AddInput("X", "(Tensor) tensor to be sent.");
AddAttr<int>("ring_id", "(int default 0) nccl communication ring id.")
.SetDefault(0);
......
......@@ -80,7 +80,7 @@ class RecvOpV2 : public framework::OperatorWithKernel {
class RecvOpV2Maker : public framework::OpProtoAndCheckerMaker {
public:
void Make() {
void Make() override {
AddOutput("Out", "(Tensor) tensor to receive.");
AddAttr<int>("ring_id", "(int default 0) nccl communication ring id.")
.SetDefault(0);
......
......@@ -56,7 +56,7 @@ class SendOpV2 : public framework::OperatorWithKernel {
class SendOpV2Maker : public framework::OpProtoAndCheckerMaker {
public:
void Make() {
void Make() override {
AddInput("X", "(Tensor) tensor to be sent.");
AddAttr<int>("ring_id", "(int default 0) nccl communication ring id.")
.SetDefault(0);
......
......@@ -127,7 +127,7 @@ class FusedGemmEpilogueOp : public framework::OperatorWithKernel {
}
phi::KernelKey GetExpectedKernelType(
const framework::ExecutionContext& ctx) const {
const framework::ExecutionContext& ctx) const override {
auto data_type = OperatorWithKernel::IndicateVarDataType(ctx, "X");
return phi::KernelKey(data_type, ctx.GetPlace());
}
......@@ -277,7 +277,7 @@ class FusedGemmEpilogueGradOp : public framework::OperatorWithKernel {
}
phi::KernelKey GetExpectedKernelType(
const framework::ExecutionContext& ctx) const {
const framework::ExecutionContext& ctx) const override {
auto data_type = OperatorWithKernel::IndicateVarDataType(ctx, "DOut");
return phi::KernelKey(data_type, ctx.GetPlace());
}
......
......@@ -35,7 +35,7 @@ class ResNetUnitOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const {
void InferShape(framework::InferShapeContext* ctx) const override {
// Check input
OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "ResNetUnitOp");
OP_INOUT_CHECK(
......@@ -201,7 +201,7 @@ class ResNetUnitOp : public framework::OperatorWithKernel {
protected:
phi::KernelKey GetExpectedKernelType(
const framework::ExecutionContext& ctx) const {
const framework::ExecutionContext& ctx) const override {
auto input_data_type = OperatorWithKernel::IndicateVarDataType(ctx, "X");
// By default, the type of the scale, bias, mean,
// and var tensors should be float when input tensor's dtype is float16.
......@@ -223,7 +223,7 @@ class ResNetUnitOp : public framework::OperatorWithKernel {
class ResNetUnitOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() {
void Make() override {
AddInput("X", "The input 1 tensor");
AddInput("FilterX", "Filter tensor of input 1");
AddInput("ScaleX", "Scale tensor of input 1 used in batchnorm");
......@@ -283,7 +283,7 @@ class ResNetUnitGradOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const {
void InferShape(framework::InferShapeContext* ctx) const override {
// check input
OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "ResNetUnitGradOp");
OP_INOUT_CHECK(
......@@ -390,7 +390,7 @@ class ResNetUnitGradOp : public framework::OperatorWithKernel {
protected:
phi::KernelKey GetExpectedKernelType(
const framework::ExecutionContext& ctx) const {
const framework::ExecutionContext& ctx) const override {
PADDLE_ENFORCE_NOT_NULL(
ctx.InputVar(framework::GradVarName("Y")),
platform::errors::NotFound(
......
......@@ -21,7 +21,7 @@ class YoloBoxHeadOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const {
void InferShape(framework::InferShapeContext* ctx) const override {
OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "yolo_box_head");
OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "yolo_box_head");
ctx->SetOutputDim("Out", ctx->GetInputDim("X"));
......@@ -30,7 +30,7 @@ class YoloBoxHeadOp : public framework::OperatorWithKernel {
class YoloBoxHeadOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() {
void Make() override {
AddInput("X", "The input tensor");
AddAttr<std::vector<int>>("anchors",
"The anchor width and height, "
......
......@@ -21,7 +21,7 @@ class YoloBoxPostOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const {
void InferShape(framework::InferShapeContext* ctx) const override {
OP_INOUT_CHECK(ctx->HasInput("Boxes0"), "Input", "Boxes0", "yolo_box_post");
OP_INOUT_CHECK(ctx->HasInput("Boxes1"), "Input", "Boxes1", "yolo_box_post");
OP_INOUT_CHECK(ctx->HasInput("Boxes2"), "Input", "Boxes2", "yolo_box_post");
......@@ -37,7 +37,7 @@ class YoloBoxPostOp : public framework::OperatorWithKernel {
class YoloBoxPostOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() {
void Make() override {
AddInput("Boxes0", "The Boxes0 tensor");
AddInput("Boxes1", "The Boxes1 tensor");
AddInput("Boxes2", "The Boxes2 tensor");
......
......@@ -38,7 +38,7 @@ class MarkerOp : public framework::OperatorWithKernel {
class MarkerOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() {
void Make() override {
AddAttr<std::string>("marker_role",
"(string, default forward)forward or backward,"
" mark different stages of porcess.")
......
......@@ -32,7 +32,7 @@ class MulOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel;
phi::KernelKey GetExpectedKernelType(
const framework::ExecutionContext& ctx) const {
const framework::ExecutionContext& ctx) const override {
auto input_data_type = OperatorWithKernel::IndicateVarDataType(ctx, "X");
return phi::KernelKey(input_data_type, ctx.GetPlace());
}
......@@ -104,7 +104,7 @@ class MulGradOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel;
phi::KernelKey GetExpectedKernelType(
const framework::ExecutionContext& ctx) const {
const framework::ExecutionContext& ctx) const override {
auto input_data_type = OperatorWithKernel::IndicateVarDataType(ctx, "X");
return phi::KernelKey(input_data_type, ctx.GetPlace());
}
......
......@@ -33,7 +33,7 @@ class NopOp : public framework::OperatorWithKernel {
class NopOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() {
void Make() override {
AddInput("X", "(Tensor) The input tensor of nop op.").AsDuplicable();
AddOutput("Out", "(Tensor) The output tensor of nop op.").AsDuplicable();
AddComment(R"DOC(
......
......@@ -40,7 +40,7 @@ class Pow2DecayWithLinearWarmupOp : public framework::OperatorWithKernel {
class Pow2DecayWithLinearWarmupOpMaker
: public framework::OpProtoAndCheckerMaker {
public:
void Make() {
void Make() override {
AddInput("LearningRate", "(Tensor) The input learning rate Tensor.");
AddInput("Step", "(Tensor) The input global step Tensor.");
AddOutput("LearningRateOut",
......
......@@ -46,7 +46,7 @@ class FetchBarrierOp : public framework::OperatorBase {
class FetchBarrierOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() {
void Make() override {
AddInput("X", "(Any) Dummy inputs, used for control dependency")
.AsDispensable()
.AsDuplicable();
......
......@@ -177,7 +177,7 @@ void HeterListenAndServOp::RunImpl(const framework::Scope &scope,
class HeterListenAndServOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() {
void Make() override {
AddInput("X", "(Tensor) Variables that server recv.").AsDuplicable();
AddComment(
R"DOC(" + "HeterListenAndServ operator" + "\n" + "This operator" +
......
......@@ -52,7 +52,7 @@ class ListenAndServOp : public framework::OperatorBase {
class ListenAndServOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() {
void Make() override {
AddInput("X", "(Tensor) Variables that server recv.").AsDuplicable();
AddComment(R"DOC(" + "ListenAndServ operator" + "\n" + "This operator" +
" will start a RPC server which can receive variables from send_op and send" +
......
......@@ -69,7 +69,7 @@ class SendAndRecvOp : public framework::OperatorWithKernel {
class SendAndRecvOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() {
void Make() override {
AddInput("X", "Tensor Input variable to be sent").AsDuplicable();
AddOutput("Out", "Tensor Output varibale to be recv").AsDuplicable();
AddAttr<std::string>("message_name", "");
......
......@@ -47,7 +47,7 @@ class SendBarrierOp : public framework::OperatorBase {
class SendBarrierOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() {
void Make() override {
AddInput("X", "(Any) Dummy inputs, used for control dependency")
.AsDuplicable();
AddOutput("Out", "(Any) Dummy outputs, used for control dependency")
......
......@@ -75,7 +75,7 @@ class SendOp : public framework::OperatorBase {
class SendOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() {
void Make() override {
AddInput("X", "(Tensor, SelectedRows) Input variables to be sent")
.AsDuplicable();
AddOutput("Out", "(Any) Dummy outputs, used for control dependency")
......
......@@ -94,8 +94,8 @@ DECLARE_NO_NEED_BUFFER_VARS_INFERER(ReduceMeanGradNoNeedBufferVarInferer, "X");
class __reduce_meanMaker__ : public ops::ReduceBaseOpMaker {
protected:
virtual std::string GetName() const { return "reduce_mean"; }
virtual std::string GetOpType() const { return "Reduce reduce_mean"; }
std::string GetName() const override { return "reduce_mean"; }
std::string GetOpType() const override { return "Reduce reduce_mean"; }
};
DECLARE_INFER_SHAPE_FUNCTOR(
......
......@@ -24,7 +24,7 @@ namespace paddle {
namespace operators {
class TDMChildOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() {
void Make() override {
AddInput("X",
"X(Tensor), dtype support int32/int64, X variable is the "
"node id of TDM-Tree");
......
......@@ -25,7 +25,7 @@ namespace operators {
class TDMSamplerOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() {
void Make() override {
AddInput("X",
"X(Tensor), Input variable which"
"mapping the leaf node idx of tdm tree,"
......
......@@ -89,7 +89,7 @@ class PyVariableWrapperHook : public imperative::VariableWrapperHook {
Py_INCREF(py_func_);
}
~PyVariableWrapperHook() {
~PyVariableWrapperHook() override {
py::gil_scoped_acquire gil;
Py_DECREF(py_func_);
}
......
......@@ -45,7 +45,7 @@ class IRPrinting : public PassInstrumentation {
explicit IRPrinting(std::unique_ptr<PassManager::IRPrinterOption> option)
: option_(std::move(option)) {}
~IRPrinting() = default;
~IRPrinting() override = default;
void RunBeforePass(Pass *pass, Operation *op) override {
if (option_->print_on_change()) {
......
......@@ -51,7 +51,7 @@ class Timer {
class PassTimer : public PassInstrumentation {
public:
explicit PassTimer(bool print_module) : print_module_(print_module) {}
~PassTimer() = default;
~PassTimer() override = default;
void RunBeforePipeline(ir::Operation* op) override {
pipeline_timers_[op] = Timer();
......
......@@ -323,7 +323,7 @@ class DeviceTracerImpl : public DeviceTracer {
#endif
}
void AddAnnotation(uint32_t id, Event *event) {
void AddAnnotation(uint32_t id, Event *event) override {
#ifdef PADDLE_WITH_SW
std::forward_list<std::pair<uint32_t, Event *>> *local_correlations_pairs =
nullptr;
......@@ -339,7 +339,8 @@ class DeviceTracerImpl : public DeviceTracer {
local_correlations_pairs->push_front(std::make_pair(id, event));
}
void AddAnnotations(const std::map<uint64_t, ThreadEvents> &thr_events) {
void AddAnnotations(
const std::map<uint64_t, ThreadEvents> &thr_events) override {
for (auto &tmp : active_kind_records_) {
for (const ActiveKindRecord &r : tmp) {
auto iter = thr_events.find(r.thread_id);
......@@ -384,7 +385,7 @@ class DeviceTracerImpl : public DeviceTracer {
uint64_t start_ns,
uint64_t end_ns,
int64_t device_id,
uint64_t thread_id) {
uint64_t thread_id) override {
if (anno.empty()) {
VLOG(1) << "Empty timeline annotation.";
return;
......@@ -409,7 +410,7 @@ class DeviceTracerImpl : public DeviceTracer {
int64_t device_id,
int64_t stream_id,
uint32_t correlation_id,
uint64_t bytes) {
uint64_t bytes) override {
// 0 means timestamp information could not be collected for the kernel.
if (start_ns == 0 || end_ns == 0 || start_ns == end_ns) {
VLOG(3) << name << " cannot be traced";
......@@ -427,7 +428,7 @@ class DeviceTracerImpl : public DeviceTracer {
const Place &place,
const std::string &alloc_in,
const std::string &free_in,
uint64_t thread_id) {
uint64_t thread_id) override {
if (0 == start_ns || 0 == end_ns) {
VLOG(3) << alloc_in << ", " << free_in << " Cannot be traced.";
return;
......@@ -452,7 +453,7 @@ class DeviceTracerImpl : public DeviceTracer {
uint64_t end_ns,
int64_t device_id,
uint64_t thread_id,
uint32_t correlation_id) {
uint32_t correlation_id) override {
if (anno.empty()) {
VLOG(1) << "Empty timeline annotation.";
return;
......@@ -478,7 +479,7 @@ class DeviceTracerImpl : public DeviceTracer {
uint64_t end,
int64_t device_id,
int64_t stream_id,
uint32_t correlation_id) {
uint32_t correlation_id) override {
// 0 means timestamp information could not be collected for the kernel.
if (start == 0 || end == 0 || start == end) {
VLOG(3) << correlation_id << " cannot be traced";
......@@ -490,12 +491,12 @@ class DeviceTracerImpl : public DeviceTracer {
KernelRecord{name, start, end, device_id, stream_id, correlation_id});
}
bool IsEnabled() {
bool IsEnabled() override {
std::lock_guard<std::mutex> l(trace_mu_);
return enabled_;
}
void Enable() {
void Enable() override {
std::lock_guard<std::mutex> l(trace_mu_);
if (enabled_) {
return;
......@@ -544,7 +545,7 @@ class DeviceTracerImpl : public DeviceTracer {
enabled_ = true;
}
void Reset() {
void Reset() override {
#ifdef PADDLE_WITH_CUPTI
CUPTI_CALL(
dynload::cuptiActivityFlushAll(CUPTI_ACTIVITY_FLAG_FLUSH_FORCED));
......@@ -559,7 +560,7 @@ class DeviceTracerImpl : public DeviceTracer {
for (auto &tmp : active_kind_records_) tmp.clear();
}
void GenEventKernelCudaElapsedTime() {
void GenEventKernelCudaElapsedTime() override {
#ifdef PADDLE_WITH_CUPTI
if (correlations_.empty())
for (auto &tmp : correlations_pairs)
......@@ -591,7 +592,7 @@ class DeviceTracerImpl : public DeviceTracer {
#endif
}
proto::Profile GenProfile(const std::string &profile_path) {
proto::Profile GenProfile(const std::string &profile_path) override {
proto::Profile profile_pb = this->GetProfile();
std::ofstream profile_f;
profile_f.open(profile_path,
......@@ -601,7 +602,7 @@ class DeviceTracerImpl : public DeviceTracer {
return profile_pb;
}
proto::Profile GetProfile() {
proto::Profile GetProfile() override {
int miss = 0, find = 0;
std::lock_guard<std::mutex> l(trace_mu_);
proto::Profile profile_pb;
......@@ -711,7 +712,7 @@ class DeviceTracerImpl : public DeviceTracer {
return profile_pb;
}
void Disable() {
void Disable() override {
#ifdef PADDLE_WITH_CUPTI
// flush might cause additional calls to DeviceTracker.
CUPTI_CALL(
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册