未验证 提交 cd0f1523 编写于 作者: R Ruibin Cheung 提交者: GitHub

[clang-tidy] enable modernize-use-override (#55491)

上级 2d98758c
...@@ -185,7 +185,7 @@ Checks: ' ...@@ -185,7 +185,7 @@ Checks: '
-modernize-use-equals-delete, -modernize-use-equals-delete,
-modernize-use-noexcept, -modernize-use-noexcept,
-modernize-use-nullptr, -modernize-use-nullptr,
-modernize-use-override, modernize-use-override,
-modernize-use-transparent-functors, -modernize-use-transparent-functors,
-modernize-use-uncaught-exceptions, -modernize-use-uncaught-exceptions,
-performance-faster-string-find, -performance-faster-string-find,
......
...@@ -69,7 +69,7 @@ double GetFuseParameterMemorySize() { return FLAGS_fuse_parameter_memory_size; } ...@@ -69,7 +69,7 @@ double GetFuseParameterMemorySize() { return FLAGS_fuse_parameter_memory_size; }
class CoalesceGradTensorPass : public ir::Pass { class CoalesceGradTensorPass : public ir::Pass {
protected: protected:
void ApplyImpl(ir::Graph *graph) const { void ApplyImpl(ir::Graph *graph) const override {
if (Get<size_t>(details::kNRanks) <= 1) { if (Get<size_t>(details::kNRanks) <= 1) {
VLOG(6) << "The number of place is" << Get<size_t>(details::kNRanks) VLOG(6) << "The number of place is" << Get<size_t>(details::kNRanks)
<< ", there doesn't need apply FuseAllReduceOpPass."; << ", there doesn't need apply FuseAllReduceOpPass.";
......
...@@ -30,9 +30,9 @@ class Node; ...@@ -30,9 +30,9 @@ class Node;
class FuseAdamOpPass : public FuseOptimizerOpPass { class FuseAdamOpPass : public FuseOptimizerOpPass {
private: private:
const std::string GetOpType() const { return "adam"; } const std::string GetOpType() const override { return "adam"; }
const std::vector<std::string> GetAuxiliaryVarNames() const { const std::vector<std::string> GetAuxiliaryVarNames() const override {
return {"Moment1", "Moment2", "Beta1Pow", "Beta2Pow"}; return {"Moment1", "Moment2", "Beta1Pow", "Beta2Pow"};
} }
...@@ -41,7 +41,7 @@ class FuseAdamOpPass : public FuseOptimizerOpPass { ...@@ -41,7 +41,7 @@ class FuseAdamOpPass : public FuseOptimizerOpPass {
&aux_var_set, &aux_var_set,
const std::unordered_map<std::string, std::string> &fused_vars_name, const std::unordered_map<std::string, std::string> &fused_vars_name,
const std::vector<ir::Node *> &adam_ops, const std::vector<ir::Node *> &adam_ops,
ir::Graph *graph) const { ir::Graph *graph) const override {
auto fused_adam_node = auto fused_adam_node =
FuseAdamOps(aux_var_set, fused_vars_name, adam_ops, graph); FuseAdamOps(aux_var_set, fused_vars_name, adam_ops, graph);
return fused_adam_node; return fused_adam_node;
......
...@@ -29,18 +29,18 @@ class Node; ...@@ -29,18 +29,18 @@ class Node;
class FuseMomentumOpPass : public FuseOptimizerOpPass { class FuseMomentumOpPass : public FuseOptimizerOpPass {
private: private:
virtual const std::string GetOpType() const { return "momentum"; } const std::string GetOpType() const override { return "momentum"; }
virtual const std::vector<std::string> GetAuxiliaryVarNames() const { const std::vector<std::string> GetAuxiliaryVarNames() const override {
return {"Velocity"}; return {"Velocity"};
} }
// Fuse Momentum Ops // Fuse Momentum Ops
virtual ir::Node *FuseOptimizerOps( ir::Node *FuseOptimizerOps(
const std::unordered_map<std::string, std::vector<std::string>> &vars_set, const std::unordered_map<std::string, std::vector<std::string>> &vars_set,
const std::unordered_map<std::string, std::string> &fused_vars_name, const std::unordered_map<std::string, std::string> &fused_vars_name,
const std::vector<ir::Node *> &momentum_ops, const std::vector<ir::Node *> &momentum_ops,
ir::Graph *graph) const { ir::Graph *graph) const override {
PADDLE_ENFORCE_GT( PADDLE_ENFORCE_GT(
momentum_ops.size(), momentum_ops.size(),
static_cast<size_t>(0), static_cast<size_t>(0),
......
...@@ -28,18 +28,18 @@ class Node; ...@@ -28,18 +28,18 @@ class Node;
class FuseSgdOpPass : public FuseOptimizerOpPass { class FuseSgdOpPass : public FuseOptimizerOpPass {
private: private:
virtual const std::string GetOpType() const { return "sgd"; } const std::string GetOpType() const override { return "sgd"; }
virtual const std::vector<std::string> GetAuxiliaryVarNames() const { const std::vector<std::string> GetAuxiliaryVarNames() const override {
return {}; return {};
} }
// Fuse Sgd Ops // Fuse Sgd Ops
virtual ir::Node *FuseOptimizerOps( ir::Node *FuseOptimizerOps(
const std::unordered_map<std::string, std::vector<std::string>> &vars_set, const std::unordered_map<std::string, std::vector<std::string>> &vars_set,
const std::unordered_map<std::string, std::string> &fused_vars_name, const std::unordered_map<std::string, std::string> &fused_vars_name,
const std::vector<ir::Node *> &sgd_ops, const std::vector<ir::Node *> &sgd_ops,
ir::Graph *graph) const { ir::Graph *graph) const override {
PADDLE_ENFORCE_GT( PADDLE_ENFORCE_GT(
sgd_ops.size(), sgd_ops.size(),
static_cast<size_t>(0), static_cast<size_t>(0),
......
...@@ -53,7 +53,7 @@ class WorkQueueImpl : public WorkQueue { ...@@ -53,7 +53,7 @@ class WorkQueueImpl : public WorkQueue {
options_.always_spinning); options_.always_spinning);
} }
virtual ~WorkQueueImpl() { ~WorkQueueImpl() override {
delete queue_; delete queue_;
if (tracker_ != nullptr) { if (tracker_ != nullptr) {
tracker_->~TaskTracker(); tracker_->~TaskTracker();
...@@ -94,7 +94,7 @@ class WorkQueueGroupImpl : public WorkQueueGroup { ...@@ -94,7 +94,7 @@ class WorkQueueGroupImpl : public WorkQueueGroup {
explicit WorkQueueGroupImpl( explicit WorkQueueGroupImpl(
const std::vector<WorkQueueOptions>& queue_options); const std::vector<WorkQueueOptions>& queue_options);
~WorkQueueGroupImpl(); ~WorkQueueGroupImpl() override;
void AddTask(size_t queue_idx, std::function<void()> fn) override; void AddTask(size_t queue_idx, std::function<void()> fn) override;
......
...@@ -40,7 +40,7 @@ class KernelArgsNameMakerByOpProto : public KernelArgsNameMaker { ...@@ -40,7 +40,7 @@ class KernelArgsNameMakerByOpProto : public KernelArgsNameMaker {
platform::errors::InvalidArgument("Op proto cannot be nullptr.")); platform::errors::InvalidArgument("Op proto cannot be nullptr."));
} }
~KernelArgsNameMakerByOpProto() {} ~KernelArgsNameMakerByOpProto() override {}
const paddle::small_vector<const char*>& GetInputArgsNames() override; const paddle::small_vector<const char*>& GetInputArgsNames() override;
const paddle::small_vector<const char*>& GetOutputArgsNames() override; const paddle::small_vector<const char*>& GetOutputArgsNames() override;
......
...@@ -127,7 +127,7 @@ class CUDAGraphAllocator ...@@ -127,7 +127,7 @@ class CUDAGraphAllocator
: underlying_allocator_(allocator) {} : underlying_allocator_(allocator) {}
public: public:
~CUDAGraphAllocator() {} ~CUDAGraphAllocator() override {}
static std::shared_ptr<Allocator> Create( static std::shared_ptr<Allocator> Create(
const std::shared_ptr<Allocator>& allocator) { const std::shared_ptr<Allocator>& allocator) {
...@@ -135,14 +135,14 @@ class CUDAGraphAllocator ...@@ -135,14 +135,14 @@ class CUDAGraphAllocator
} }
protected: protected:
phi::Allocation* AllocateImpl(size_t size) { phi::Allocation* AllocateImpl(size_t size) override {
VLOG(10) << "Allocate " << size << " for CUDA Graph"; VLOG(10) << "Allocate " << size << " for CUDA Graph";
return new PrivateAllocation(this, return new PrivateAllocation(this,
static_unique_ptr_cast<Allocation>( static_unique_ptr_cast<Allocation>(
underlying_allocator_->Allocate(size))); underlying_allocator_->Allocate(size)));
} }
void FreeImpl(phi::Allocation* allocation) { void FreeImpl(phi::Allocation* allocation) override {
VLOG(10) << "delete for CUDA Graph"; VLOG(10) << "delete for CUDA Graph";
delete allocation; delete allocation;
} }
......
...@@ -36,7 +36,7 @@ class AllReduceDelOp : public framework::OperatorWithKernel { ...@@ -36,7 +36,7 @@ class AllReduceDelOp : public framework::OperatorWithKernel {
class AllReduceDelOpMaker : public framework::OpProtoAndCheckerMaker { class AllReduceDelOpMaker : public framework::OpProtoAndCheckerMaker {
public: public:
void Make() { void Make() override {
AddInput("X", "(Tensor), tensor to be allreduced."); AddInput("X", "(Tensor), tensor to be allreduced.");
AddOutput("Out", "(Tensor) the result of allreduced."); AddOutput("Out", "(Tensor) the result of allreduced.");
AddAttr<int>("reduce_type", "(int) determine the reduce type.") AddAttr<int>("reduce_type", "(int) determine the reduce type.")
......
...@@ -45,7 +45,7 @@ class AllToAllBaseOp : public framework::OperatorWithKernel { ...@@ -45,7 +45,7 @@ class AllToAllBaseOp : public framework::OperatorWithKernel {
class AllToAllBaseOpMaker : public framework::OpProtoAndCheckerMaker { class AllToAllBaseOpMaker : public framework::OpProtoAndCheckerMaker {
public: public:
void Make() { void Make() override {
AddInput("X", "(Tensor) tensor send."); AddInput("X", "(Tensor) tensor send.");
AddOutput("Out", "(Tensor) the result of alltoall."); AddOutput("Out", "(Tensor) the result of alltoall.");
AddAttr<int>("ring_id", "(int default 0) nccl communication ring id.") AddAttr<int>("ring_id", "(int default 0) nccl communication ring id.")
......
...@@ -27,7 +27,7 @@ class BarrierOp : public framework::OperatorWithKernel { ...@@ -27,7 +27,7 @@ class BarrierOp : public framework::OperatorWithKernel {
class BarrierOpMaker : public framework::OpProtoAndCheckerMaker { class BarrierOpMaker : public framework::OpProtoAndCheckerMaker {
public: public:
void Make() { void Make() override {
AddInput("X", "(Tensor) Input data (only used in CUDAKernel)."); AddInput("X", "(Tensor) Input data (only used in CUDAKernel).");
AddOutput("Out", "(Tensor) Output data (only used in CUDAKernel)."); AddOutput("Out", "(Tensor) Output data (only used in CUDAKernel).");
AddAttr<int>("ring_id", "(int default 0) communication ring id.") AddAttr<int>("ring_id", "(int default 0) communication ring id.")
......
...@@ -44,7 +44,7 @@ class CAllGatherOp : public framework::OperatorWithKernel { ...@@ -44,7 +44,7 @@ class CAllGatherOp : public framework::OperatorWithKernel {
class CAllGatherOpMaker : public framework::OpProtoAndCheckerMaker { class CAllGatherOpMaker : public framework::OpProtoAndCheckerMaker {
public: public:
void Make() { void Make() override {
AddInput("X", "(Tensor) tensor to be allgather"); AddInput("X", "(Tensor) tensor to be allgather");
AddOutput("Out", "(Tensor) the allgather result"); AddOutput("Out", "(Tensor) the allgather result");
AddAttr<int>("ring_id", "(int default 0) communication ring id.") AddAttr<int>("ring_id", "(int default 0) communication ring id.")
......
...@@ -35,7 +35,7 @@ class CBroadcastOp : public framework::OperatorWithKernel { ...@@ -35,7 +35,7 @@ class CBroadcastOp : public framework::OperatorWithKernel {
class CBroadcastOpMaker : public framework::OpProtoAndCheckerMaker { class CBroadcastOpMaker : public framework::OpProtoAndCheckerMaker {
public: public:
void Make() { void Make() override {
AddInput("X", "(Tensor) tensor to be broadcasted."); AddInput("X", "(Tensor) tensor to be broadcasted.");
AddOutput("Out", "(Tensor) the result of broadcast."); AddOutput("Out", "(Tensor) the result of broadcast.");
AddAttr<int>("ring_id", "(int default 0) nccl communication ring id.") AddAttr<int>("ring_id", "(int default 0) nccl communication ring id.")
......
...@@ -38,7 +38,7 @@ namespace operators { ...@@ -38,7 +38,7 @@ namespace operators {
class CCommInitAllInferShape : public framework::InferShapeBase { class CCommInitAllInferShape : public framework::InferShapeBase {
public: public:
~CCommInitAllInferShape() {} ~CCommInitAllInferShape() override {}
void operator()(framework::InferShapeContext* ctx) const override{}; void operator()(framework::InferShapeContext* ctx) const override{};
}; };
......
...@@ -38,7 +38,7 @@ namespace operators { ...@@ -38,7 +38,7 @@ namespace operators {
class CCommInitMultiTrainerInferShape : public framework::InferShapeBase { class CCommInitMultiTrainerInferShape : public framework::InferShapeBase {
public: public:
~CCommInitMultiTrainerInferShape() {} ~CCommInitMultiTrainerInferShape() override {}
void operator()(framework::InferShapeContext* ctx) const override{}; void operator()(framework::InferShapeContext* ctx) const override{};
}; };
......
...@@ -81,7 +81,7 @@ class CConcatOpGradMaker : public framework::SingleGradOpMaker<T> { ...@@ -81,7 +81,7 @@ class CConcatOpGradMaker : public framework::SingleGradOpMaker<T> {
class CConcatOpMaker : public framework::OpProtoAndCheckerMaker { class CConcatOpMaker : public framework::OpProtoAndCheckerMaker {
public: public:
void Make() { void Make() override {
AddInput("X", "(Tensor) tensor to be concated."); AddInput("X", "(Tensor) tensor to be concated.");
AddOutput("Out", "(Tensor) the result of concat."); AddOutput("Out", "(Tensor) the result of concat.");
AddAttr<int>("rank", "(int default 0) rank id.").SetDefault(0); AddAttr<int>("rank", "(int default 0) rank id.").SetDefault(0);
......
...@@ -44,7 +44,7 @@ class CIdentityOp : public framework::OperatorWithKernel { ...@@ -44,7 +44,7 @@ class CIdentityOp : public framework::OperatorWithKernel {
class CIdentityOpMaker : public framework::OpProtoAndCheckerMaker { class CIdentityOpMaker : public framework::OpProtoAndCheckerMaker {
public: public:
void Make() { void Make() override {
AddInput("X", "(Tensor) identity tensor."); AddInput("X", "(Tensor) identity tensor.");
AddOutput("Out", "(Tensor) identity tensor."); AddOutput("Out", "(Tensor) identity tensor.");
AddAttr<int>("ring_id", "(int default 0) nccl communication ring id.") AddAttr<int>("ring_id", "(int default 0) nccl communication ring id.")
......
...@@ -42,7 +42,7 @@ class CReduceScatterOp : public framework::OperatorWithKernel { ...@@ -42,7 +42,7 @@ class CReduceScatterOp : public framework::OperatorWithKernel {
class CReduceScatterOpMaker : public framework::OpProtoAndCheckerMaker { class CReduceScatterOpMaker : public framework::OpProtoAndCheckerMaker {
public: public:
void Make() { void Make() override {
AddInput("X", "(Tensor) tensor to be allgather"); AddInput("X", "(Tensor) tensor to be allgather");
AddOutput("Out", "(Tensor) the allgather result"); AddOutput("Out", "(Tensor) the allgather result");
AddAttr<int>("ring_id", "(int default 0) communication ring id.") AddAttr<int>("ring_id", "(int default 0) communication ring id.")
......
...@@ -61,7 +61,7 @@ class CScatterOp : public framework::OperatorWithKernel { ...@@ -61,7 +61,7 @@ class CScatterOp : public framework::OperatorWithKernel {
class CScatterOpMaker : public framework::OpProtoAndCheckerMaker { class CScatterOpMaker : public framework::OpProtoAndCheckerMaker {
public: public:
void Make() { void Make() override {
AddInput("X", "(Tensor) tensor to be broadcasted."); AddInput("X", "(Tensor) tensor to be broadcasted.");
AddOutput("Out", "(Tensor) the result of broadcast."); AddOutput("Out", "(Tensor) the result of broadcast.");
AddAttr<int>("ring_id", "(int default 0) nccl communication ring id.") AddAttr<int>("ring_id", "(int default 0) nccl communication ring id.")
......
...@@ -83,7 +83,7 @@ class CSoftmaxWithCrossEntropyOp : public framework::OperatorWithKernel { ...@@ -83,7 +83,7 @@ class CSoftmaxWithCrossEntropyOp : public framework::OperatorWithKernel {
class CSoftmaxWithCrossEntropyOpMaker class CSoftmaxWithCrossEntropyOpMaker
: public framework::OpProtoAndCheckerMaker { : public framework::OpProtoAndCheckerMaker {
public: public:
void Make() { void Make() override {
AddInput("Logits", AddInput("Logits",
"(Tensor, default: Tensor<float>), The input tensor of unscaled " "(Tensor, default: Tensor<float>), The input tensor of unscaled "
"log probabilities, whose dimension :attr:`axis` should be scaled " "log probabilities, whose dimension :attr:`axis` should be scaled "
......
...@@ -89,7 +89,7 @@ class CSplitOpGradMaker : public framework::SingleGradOpMaker<T> { ...@@ -89,7 +89,7 @@ class CSplitOpGradMaker : public framework::SingleGradOpMaker<T> {
class CSplitOpMaker : public framework::OpProtoAndCheckerMaker { class CSplitOpMaker : public framework::OpProtoAndCheckerMaker {
public: public:
void Make() { void Make() override {
AddInput("X", "(Tensor) tensor to be split."); AddInput("X", "(Tensor) tensor to be split.");
AddOutput("Out", "(Tensor) the result of split."); AddOutput("Out", "(Tensor) the result of split.");
AddAttr<int>("rank", "(int default 0) rank id.").SetDefault(0); AddAttr<int>("rank", "(int default 0) rank id.").SetDefault(0);
......
...@@ -18,7 +18,7 @@ namespace operators { ...@@ -18,7 +18,7 @@ namespace operators {
class CSyncCalcStreamOpMaker : public framework::OpProtoAndCheckerMaker { class CSyncCalcStreamOpMaker : public framework::OpProtoAndCheckerMaker {
public: public:
void Make() { void Make() override {
AddInput("X", "(Tensor) Dependency of the variable need to sync"); AddInput("X", "(Tensor) Dependency of the variable need to sync");
AddOutput("Out", "(Tensor) Dependency of the variable need to sync"); AddOutput("Out", "(Tensor) Dependency of the variable need to sync");
AddComment(R"DOC( AddComment(R"DOC(
......
...@@ -31,7 +31,7 @@ class CSyncCommStreamOp : public framework::OperatorWithKernel { ...@@ -31,7 +31,7 @@ class CSyncCommStreamOp : public framework::OperatorWithKernel {
class CSyncCommStreamOpMaker : public framework::OpProtoAndCheckerMaker { class CSyncCommStreamOpMaker : public framework::OpProtoAndCheckerMaker {
public: public:
void Make() { void Make() override {
AddInput("X", "(Tensor) Dependency of the variable need to sync") AddInput("X", "(Tensor) Dependency of the variable need to sync")
.AsDuplicable(); .AsDuplicable();
AddOutput("Out", "(Tensor) Dependency of the variable need to sync") AddOutput("Out", "(Tensor) Dependency of the variable need to sync")
......
...@@ -73,7 +73,7 @@ class CWaitCommOp : public framework::OperatorBase { ...@@ -73,7 +73,7 @@ class CWaitCommOp : public framework::OperatorBase {
class CWaitCommOpMaker : public framework::OpProtoAndCheckerMaker { class CWaitCommOpMaker : public framework::OpProtoAndCheckerMaker {
public: public:
void Make() { void Make() override {
AddInput("X", "(Tensor) Dependency of the variable need to sync") AddInput("X", "(Tensor) Dependency of the variable need to sync")
.AsDuplicable(); .AsDuplicable();
AddOutput("Out", "(Tensor) Dependency of the variable need to sync") AddOutput("Out", "(Tensor) Dependency of the variable need to sync")
......
...@@ -74,7 +74,7 @@ class CWaitComputeOp : public framework::OperatorBase { ...@@ -74,7 +74,7 @@ class CWaitComputeOp : public framework::OperatorBase {
class CWaitComputeOpMaker : public framework::OpProtoAndCheckerMaker { class CWaitComputeOpMaker : public framework::OpProtoAndCheckerMaker {
public: public:
void Make() { void Make() override {
AddInput("X", "(Tensor) Dependency of the variable need to sync") AddInput("X", "(Tensor) Dependency of the variable need to sync")
.AsDuplicable(); .AsDuplicable();
AddOutput("Out", "(Tensor) Dependency of the variable need to sync") AddOutput("Out", "(Tensor) Dependency of the variable need to sync")
......
...@@ -58,7 +58,7 @@ class GlobalGatherOp : public framework::OperatorWithKernel { ...@@ -58,7 +58,7 @@ class GlobalGatherOp : public framework::OperatorWithKernel {
class GlobalGatherOpMaker : public framework::OpProtoAndCheckerMaker { class GlobalGatherOpMaker : public framework::OpProtoAndCheckerMaker {
public: public:
void Make() { void Make() override {
AddInput("X", "(Tensor) tensor send."); AddInput("X", "(Tensor) tensor send.");
AddInput("local_count", AddInput("local_count",
"(Tensor) Tensor which has n_expert * world_size elements that " "(Tensor) Tensor which has n_expert * world_size elements that "
......
...@@ -61,7 +61,7 @@ class GlobalScatterOp : public framework::OperatorWithKernel { ...@@ -61,7 +61,7 @@ class GlobalScatterOp : public framework::OperatorWithKernel {
class GlobalScatterOpMaker : public framework::OpProtoAndCheckerMaker { class GlobalScatterOpMaker : public framework::OpProtoAndCheckerMaker {
public: public:
void Make() { void Make() override {
AddInput("X", "(Tensor) tensor send."); AddInput("X", "(Tensor) tensor send.");
AddInput("local_count", AddInput("local_count",
"(Tensor) Tensor which has n_expert * world_size elements that " "(Tensor) Tensor which has n_expert * world_size elements that "
......
...@@ -37,7 +37,7 @@ class MpAllReduceSumOp : public framework::OperatorWithKernel { ...@@ -37,7 +37,7 @@ class MpAllReduceSumOp : public framework::OperatorWithKernel {
class MpAllReduceSumOpMaker : public framework::OpProtoAndCheckerMaker { class MpAllReduceSumOpMaker : public framework::OpProtoAndCheckerMaker {
public: public:
void Make() { void Make() override {
AddInput("X", "(Tensor), tensor to be allreduced in model parallel."); AddInput("X", "(Tensor), tensor to be allreduced in model parallel.");
AddOutput("Out", "(Tensor) the allreduced result in model parallel."); AddOutput("Out", "(Tensor) the allreduced result in model parallel.");
AddAttr<int>("ring_id", "(int default 0) communication ring id.") AddAttr<int>("ring_id", "(int default 0) communication ring id.")
......
...@@ -45,7 +45,7 @@ class PartialAllGatherOp : public framework::OperatorWithKernel { ...@@ -45,7 +45,7 @@ class PartialAllGatherOp : public framework::OperatorWithKernel {
class PartialAllGatherOpMaker : public framework::OpProtoAndCheckerMaker { class PartialAllGatherOpMaker : public framework::OpProtoAndCheckerMaker {
public: public:
void Make() { void Make() override {
AddInput("X", "(Tensor) tensor to be partial allgather"); AddInput("X", "(Tensor) tensor to be partial allgather");
AddOutput("Out", "(Tensor) the allgather result"); AddOutput("Out", "(Tensor) the allgather result");
AddAttr<int>("ring_id", "(int default 0) communication ring id.") AddAttr<int>("ring_id", "(int default 0) communication ring id.")
......
...@@ -91,7 +91,7 @@ class PartialRecvOp : public framework::OperatorWithKernel { ...@@ -91,7 +91,7 @@ class PartialRecvOp : public framework::OperatorWithKernel {
class PartialRecvOpMaker : public framework::OpProtoAndCheckerMaker { class PartialRecvOpMaker : public framework::OpProtoAndCheckerMaker {
public: public:
void Make() { void Make() override {
AddOutput("Out", "(Tensor) tensor to receive."); AddOutput("Out", "(Tensor) tensor to receive.");
AddAttr<int>("ring_id", "(int default 0) nccl communication ring id.") AddAttr<int>("ring_id", "(int default 0) nccl communication ring id.")
.SetDefault(0); .SetDefault(0);
......
...@@ -60,7 +60,7 @@ class PartialSendOp : public framework::OperatorWithKernel { ...@@ -60,7 +60,7 @@ class PartialSendOp : public framework::OperatorWithKernel {
class PartialSendMaker : public framework::OpProtoAndCheckerMaker { class PartialSendMaker : public framework::OpProtoAndCheckerMaker {
public: public:
void Make() { void Make() override {
AddInput("X", "(Tensor) tensor to be sent."); AddInput("X", "(Tensor) tensor to be sent.");
AddAttr<int>("ring_id", "(int default 0) nccl communication ring id.") AddAttr<int>("ring_id", "(int default 0) nccl communication ring id.")
.SetDefault(0); .SetDefault(0);
......
...@@ -80,7 +80,7 @@ class RecvOpV2 : public framework::OperatorWithKernel { ...@@ -80,7 +80,7 @@ class RecvOpV2 : public framework::OperatorWithKernel {
class RecvOpV2Maker : public framework::OpProtoAndCheckerMaker { class RecvOpV2Maker : public framework::OpProtoAndCheckerMaker {
public: public:
void Make() { void Make() override {
AddOutput("Out", "(Tensor) tensor to receive."); AddOutput("Out", "(Tensor) tensor to receive.");
AddAttr<int>("ring_id", "(int default 0) nccl communication ring id.") AddAttr<int>("ring_id", "(int default 0) nccl communication ring id.")
.SetDefault(0); .SetDefault(0);
......
...@@ -56,7 +56,7 @@ class SendOpV2 : public framework::OperatorWithKernel { ...@@ -56,7 +56,7 @@ class SendOpV2 : public framework::OperatorWithKernel {
class SendOpV2Maker : public framework::OpProtoAndCheckerMaker { class SendOpV2Maker : public framework::OpProtoAndCheckerMaker {
public: public:
void Make() { void Make() override {
AddInput("X", "(Tensor) tensor to be sent."); AddInput("X", "(Tensor) tensor to be sent.");
AddAttr<int>("ring_id", "(int default 0) nccl communication ring id.") AddAttr<int>("ring_id", "(int default 0) nccl communication ring id.")
.SetDefault(0); .SetDefault(0);
......
...@@ -127,7 +127,7 @@ class FusedGemmEpilogueOp : public framework::OperatorWithKernel { ...@@ -127,7 +127,7 @@ class FusedGemmEpilogueOp : public framework::OperatorWithKernel {
} }
phi::KernelKey GetExpectedKernelType( phi::KernelKey GetExpectedKernelType(
const framework::ExecutionContext& ctx) const { const framework::ExecutionContext& ctx) const override {
auto data_type = OperatorWithKernel::IndicateVarDataType(ctx, "X"); auto data_type = OperatorWithKernel::IndicateVarDataType(ctx, "X");
return phi::KernelKey(data_type, ctx.GetPlace()); return phi::KernelKey(data_type, ctx.GetPlace());
} }
...@@ -277,7 +277,7 @@ class FusedGemmEpilogueGradOp : public framework::OperatorWithKernel { ...@@ -277,7 +277,7 @@ class FusedGemmEpilogueGradOp : public framework::OperatorWithKernel {
} }
phi::KernelKey GetExpectedKernelType( phi::KernelKey GetExpectedKernelType(
const framework::ExecutionContext& ctx) const { const framework::ExecutionContext& ctx) const override {
auto data_type = OperatorWithKernel::IndicateVarDataType(ctx, "DOut"); auto data_type = OperatorWithKernel::IndicateVarDataType(ctx, "DOut");
return phi::KernelKey(data_type, ctx.GetPlace()); return phi::KernelKey(data_type, ctx.GetPlace());
} }
......
...@@ -35,7 +35,7 @@ class ResNetUnitOp : public framework::OperatorWithKernel { ...@@ -35,7 +35,7 @@ class ResNetUnitOp : public framework::OperatorWithKernel {
public: public:
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const { void InferShape(framework::InferShapeContext* ctx) const override {
// Check input // Check input
OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "ResNetUnitOp"); OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "ResNetUnitOp");
OP_INOUT_CHECK( OP_INOUT_CHECK(
...@@ -201,7 +201,7 @@ class ResNetUnitOp : public framework::OperatorWithKernel { ...@@ -201,7 +201,7 @@ class ResNetUnitOp : public framework::OperatorWithKernel {
protected: protected:
phi::KernelKey GetExpectedKernelType( phi::KernelKey GetExpectedKernelType(
const framework::ExecutionContext& ctx) const { const framework::ExecutionContext& ctx) const override {
auto input_data_type = OperatorWithKernel::IndicateVarDataType(ctx, "X"); auto input_data_type = OperatorWithKernel::IndicateVarDataType(ctx, "X");
// By default, the type of the scale, bias, mean, // By default, the type of the scale, bias, mean,
// and var tensors should be float when input tensor's dtype is float16. // and var tensors should be float when input tensor's dtype is float16.
...@@ -223,7 +223,7 @@ class ResNetUnitOp : public framework::OperatorWithKernel { ...@@ -223,7 +223,7 @@ class ResNetUnitOp : public framework::OperatorWithKernel {
class ResNetUnitOpMaker : public framework::OpProtoAndCheckerMaker { class ResNetUnitOpMaker : public framework::OpProtoAndCheckerMaker {
public: public:
void Make() { void Make() override {
AddInput("X", "The input 1 tensor"); AddInput("X", "The input 1 tensor");
AddInput("FilterX", "Filter tensor of input 1"); AddInput("FilterX", "Filter tensor of input 1");
AddInput("ScaleX", "Scale tensor of input 1 used in batchnorm"); AddInput("ScaleX", "Scale tensor of input 1 used in batchnorm");
...@@ -283,7 +283,7 @@ class ResNetUnitGradOp : public framework::OperatorWithKernel { ...@@ -283,7 +283,7 @@ class ResNetUnitGradOp : public framework::OperatorWithKernel {
public: public:
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const { void InferShape(framework::InferShapeContext* ctx) const override {
// check input // check input
OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "ResNetUnitGradOp"); OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "ResNetUnitGradOp");
OP_INOUT_CHECK( OP_INOUT_CHECK(
...@@ -390,7 +390,7 @@ class ResNetUnitGradOp : public framework::OperatorWithKernel { ...@@ -390,7 +390,7 @@ class ResNetUnitGradOp : public framework::OperatorWithKernel {
protected: protected:
phi::KernelKey GetExpectedKernelType( phi::KernelKey GetExpectedKernelType(
const framework::ExecutionContext& ctx) const { const framework::ExecutionContext& ctx) const override {
PADDLE_ENFORCE_NOT_NULL( PADDLE_ENFORCE_NOT_NULL(
ctx.InputVar(framework::GradVarName("Y")), ctx.InputVar(framework::GradVarName("Y")),
platform::errors::NotFound( platform::errors::NotFound(
......
...@@ -21,7 +21,7 @@ class YoloBoxHeadOp : public framework::OperatorWithKernel { ...@@ -21,7 +21,7 @@ class YoloBoxHeadOp : public framework::OperatorWithKernel {
public: public:
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const { void InferShape(framework::InferShapeContext* ctx) const override {
OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "yolo_box_head"); OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "yolo_box_head");
OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "yolo_box_head"); OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "yolo_box_head");
ctx->SetOutputDim("Out", ctx->GetInputDim("X")); ctx->SetOutputDim("Out", ctx->GetInputDim("X"));
...@@ -30,7 +30,7 @@ class YoloBoxHeadOp : public framework::OperatorWithKernel { ...@@ -30,7 +30,7 @@ class YoloBoxHeadOp : public framework::OperatorWithKernel {
class YoloBoxHeadOpMaker : public framework::OpProtoAndCheckerMaker { class YoloBoxHeadOpMaker : public framework::OpProtoAndCheckerMaker {
public: public:
void Make() { void Make() override {
AddInput("X", "The input tensor"); AddInput("X", "The input tensor");
AddAttr<std::vector<int>>("anchors", AddAttr<std::vector<int>>("anchors",
"The anchor width and height, " "The anchor width and height, "
......
...@@ -21,7 +21,7 @@ class YoloBoxPostOp : public framework::OperatorWithKernel { ...@@ -21,7 +21,7 @@ class YoloBoxPostOp : public framework::OperatorWithKernel {
public: public:
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const { void InferShape(framework::InferShapeContext* ctx) const override {
OP_INOUT_CHECK(ctx->HasInput("Boxes0"), "Input", "Boxes0", "yolo_box_post"); OP_INOUT_CHECK(ctx->HasInput("Boxes0"), "Input", "Boxes0", "yolo_box_post");
OP_INOUT_CHECK(ctx->HasInput("Boxes1"), "Input", "Boxes1", "yolo_box_post"); OP_INOUT_CHECK(ctx->HasInput("Boxes1"), "Input", "Boxes1", "yolo_box_post");
OP_INOUT_CHECK(ctx->HasInput("Boxes2"), "Input", "Boxes2", "yolo_box_post"); OP_INOUT_CHECK(ctx->HasInput("Boxes2"), "Input", "Boxes2", "yolo_box_post");
...@@ -37,7 +37,7 @@ class YoloBoxPostOp : public framework::OperatorWithKernel { ...@@ -37,7 +37,7 @@ class YoloBoxPostOp : public framework::OperatorWithKernel {
class YoloBoxPostOpMaker : public framework::OpProtoAndCheckerMaker { class YoloBoxPostOpMaker : public framework::OpProtoAndCheckerMaker {
public: public:
void Make() { void Make() override {
AddInput("Boxes0", "The Boxes0 tensor"); AddInput("Boxes0", "The Boxes0 tensor");
AddInput("Boxes1", "The Boxes1 tensor"); AddInput("Boxes1", "The Boxes1 tensor");
AddInput("Boxes2", "The Boxes2 tensor"); AddInput("Boxes2", "The Boxes2 tensor");
......
...@@ -38,7 +38,7 @@ class MarkerOp : public framework::OperatorWithKernel { ...@@ -38,7 +38,7 @@ class MarkerOp : public framework::OperatorWithKernel {
class MarkerOpMaker : public framework::OpProtoAndCheckerMaker { class MarkerOpMaker : public framework::OpProtoAndCheckerMaker {
public: public:
void Make() { void Make() override {
AddAttr<std::string>("marker_role", AddAttr<std::string>("marker_role",
"(string, default forward)forward or backward," "(string, default forward)forward or backward,"
" mark different stages of porcess.") " mark different stages of porcess.")
......
...@@ -32,7 +32,7 @@ class MulOp : public framework::OperatorWithKernel { ...@@ -32,7 +32,7 @@ class MulOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
phi::KernelKey GetExpectedKernelType( phi::KernelKey GetExpectedKernelType(
const framework::ExecutionContext& ctx) const { const framework::ExecutionContext& ctx) const override {
auto input_data_type = OperatorWithKernel::IndicateVarDataType(ctx, "X"); auto input_data_type = OperatorWithKernel::IndicateVarDataType(ctx, "X");
return phi::KernelKey(input_data_type, ctx.GetPlace()); return phi::KernelKey(input_data_type, ctx.GetPlace());
} }
...@@ -104,7 +104,7 @@ class MulGradOp : public framework::OperatorWithKernel { ...@@ -104,7 +104,7 @@ class MulGradOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
phi::KernelKey GetExpectedKernelType( phi::KernelKey GetExpectedKernelType(
const framework::ExecutionContext& ctx) const { const framework::ExecutionContext& ctx) const override {
auto input_data_type = OperatorWithKernel::IndicateVarDataType(ctx, "X"); auto input_data_type = OperatorWithKernel::IndicateVarDataType(ctx, "X");
return phi::KernelKey(input_data_type, ctx.GetPlace()); return phi::KernelKey(input_data_type, ctx.GetPlace());
} }
......
...@@ -33,7 +33,7 @@ class NopOp : public framework::OperatorWithKernel { ...@@ -33,7 +33,7 @@ class NopOp : public framework::OperatorWithKernel {
class NopOpMaker : public framework::OpProtoAndCheckerMaker { class NopOpMaker : public framework::OpProtoAndCheckerMaker {
public: public:
void Make() { void Make() override {
AddInput("X", "(Tensor) The input tensor of nop op.").AsDuplicable(); AddInput("X", "(Tensor) The input tensor of nop op.").AsDuplicable();
AddOutput("Out", "(Tensor) The output tensor of nop op.").AsDuplicable(); AddOutput("Out", "(Tensor) The output tensor of nop op.").AsDuplicable();
AddComment(R"DOC( AddComment(R"DOC(
......
...@@ -40,7 +40,7 @@ class Pow2DecayWithLinearWarmupOp : public framework::OperatorWithKernel { ...@@ -40,7 +40,7 @@ class Pow2DecayWithLinearWarmupOp : public framework::OperatorWithKernel {
class Pow2DecayWithLinearWarmupOpMaker class Pow2DecayWithLinearWarmupOpMaker
: public framework::OpProtoAndCheckerMaker { : public framework::OpProtoAndCheckerMaker {
public: public:
void Make() { void Make() override {
AddInput("LearningRate", "(Tensor) The input learning rate Tensor."); AddInput("LearningRate", "(Tensor) The input learning rate Tensor.");
AddInput("Step", "(Tensor) The input global step Tensor."); AddInput("Step", "(Tensor) The input global step Tensor.");
AddOutput("LearningRateOut", AddOutput("LearningRateOut",
......
...@@ -46,7 +46,7 @@ class FetchBarrierOp : public framework::OperatorBase { ...@@ -46,7 +46,7 @@ class FetchBarrierOp : public framework::OperatorBase {
class FetchBarrierOpMaker : public framework::OpProtoAndCheckerMaker { class FetchBarrierOpMaker : public framework::OpProtoAndCheckerMaker {
public: public:
void Make() { void Make() override {
AddInput("X", "(Any) Dummy inputs, used for control dependency") AddInput("X", "(Any) Dummy inputs, used for control dependency")
.AsDispensable() .AsDispensable()
.AsDuplicable(); .AsDuplicable();
......
...@@ -177,7 +177,7 @@ void HeterListenAndServOp::RunImpl(const framework::Scope &scope, ...@@ -177,7 +177,7 @@ void HeterListenAndServOp::RunImpl(const framework::Scope &scope,
class HeterListenAndServOpMaker : public framework::OpProtoAndCheckerMaker { class HeterListenAndServOpMaker : public framework::OpProtoAndCheckerMaker {
public: public:
void Make() { void Make() override {
AddInput("X", "(Tensor) Variables that server recv.").AsDuplicable(); AddInput("X", "(Tensor) Variables that server recv.").AsDuplicable();
AddComment( AddComment(
R"DOC(" + "HeterListenAndServ operator" + "\n" + "This operator" + R"DOC(" + "HeterListenAndServ operator" + "\n" + "This operator" +
......
...@@ -52,7 +52,7 @@ class ListenAndServOp : public framework::OperatorBase { ...@@ -52,7 +52,7 @@ class ListenAndServOp : public framework::OperatorBase {
class ListenAndServOpMaker : public framework::OpProtoAndCheckerMaker { class ListenAndServOpMaker : public framework::OpProtoAndCheckerMaker {
public: public:
void Make() { void Make() override {
AddInput("X", "(Tensor) Variables that server recv.").AsDuplicable(); AddInput("X", "(Tensor) Variables that server recv.").AsDuplicable();
AddComment(R"DOC(" + "ListenAndServ operator" + "\n" + "This operator" + AddComment(R"DOC(" + "ListenAndServ operator" + "\n" + "This operator" +
" will start a RPC server which can receive variables from send_op and send" + " will start a RPC server which can receive variables from send_op and send" +
......
...@@ -69,7 +69,7 @@ class SendAndRecvOp : public framework::OperatorWithKernel { ...@@ -69,7 +69,7 @@ class SendAndRecvOp : public framework::OperatorWithKernel {
class SendAndRecvOpMaker : public framework::OpProtoAndCheckerMaker { class SendAndRecvOpMaker : public framework::OpProtoAndCheckerMaker {
public: public:
void Make() { void Make() override {
AddInput("X", "Tensor Input variable to be sent").AsDuplicable(); AddInput("X", "Tensor Input variable to be sent").AsDuplicable();
AddOutput("Out", "Tensor Output varibale to be recv").AsDuplicable(); AddOutput("Out", "Tensor Output varibale to be recv").AsDuplicable();
AddAttr<std::string>("message_name", ""); AddAttr<std::string>("message_name", "");
......
...@@ -47,7 +47,7 @@ class SendBarrierOp : public framework::OperatorBase { ...@@ -47,7 +47,7 @@ class SendBarrierOp : public framework::OperatorBase {
class SendBarrierOpMaker : public framework::OpProtoAndCheckerMaker { class SendBarrierOpMaker : public framework::OpProtoAndCheckerMaker {
public: public:
void Make() { void Make() override {
AddInput("X", "(Any) Dummy inputs, used for control dependency") AddInput("X", "(Any) Dummy inputs, used for control dependency")
.AsDuplicable(); .AsDuplicable();
AddOutput("Out", "(Any) Dummy outputs, used for control dependency") AddOutput("Out", "(Any) Dummy outputs, used for control dependency")
......
...@@ -75,7 +75,7 @@ class SendOp : public framework::OperatorBase { ...@@ -75,7 +75,7 @@ class SendOp : public framework::OperatorBase {
class SendOpMaker : public framework::OpProtoAndCheckerMaker { class SendOpMaker : public framework::OpProtoAndCheckerMaker {
public: public:
void Make() { void Make() override {
AddInput("X", "(Tensor, SelectedRows) Input variables to be sent") AddInput("X", "(Tensor, SelectedRows) Input variables to be sent")
.AsDuplicable(); .AsDuplicable();
AddOutput("Out", "(Any) Dummy outputs, used for control dependency") AddOutput("Out", "(Any) Dummy outputs, used for control dependency")
......
...@@ -94,8 +94,8 @@ DECLARE_NO_NEED_BUFFER_VARS_INFERER(ReduceMeanGradNoNeedBufferVarInferer, "X"); ...@@ -94,8 +94,8 @@ DECLARE_NO_NEED_BUFFER_VARS_INFERER(ReduceMeanGradNoNeedBufferVarInferer, "X");
class __reduce_meanMaker__ : public ops::ReduceBaseOpMaker { class __reduce_meanMaker__ : public ops::ReduceBaseOpMaker {
protected: protected:
virtual std::string GetName() const { return "reduce_mean"; } std::string GetName() const override { return "reduce_mean"; }
virtual std::string GetOpType() const { return "Reduce reduce_mean"; } std::string GetOpType() const override { return "Reduce reduce_mean"; }
}; };
DECLARE_INFER_SHAPE_FUNCTOR( DECLARE_INFER_SHAPE_FUNCTOR(
......
...@@ -24,7 +24,7 @@ namespace paddle { ...@@ -24,7 +24,7 @@ namespace paddle {
namespace operators { namespace operators {
class TDMChildOpMaker : public framework::OpProtoAndCheckerMaker { class TDMChildOpMaker : public framework::OpProtoAndCheckerMaker {
public: public:
void Make() { void Make() override {
AddInput("X", AddInput("X",
"X(Tensor), dtype support int32/int64, X variable is the " "X(Tensor), dtype support int32/int64, X variable is the "
"node id of TDM-Tree"); "node id of TDM-Tree");
......
...@@ -25,7 +25,7 @@ namespace operators { ...@@ -25,7 +25,7 @@ namespace operators {
class TDMSamplerOpMaker : public framework::OpProtoAndCheckerMaker { class TDMSamplerOpMaker : public framework::OpProtoAndCheckerMaker {
public: public:
void Make() { void Make() override {
AddInput("X", AddInput("X",
"X(Tensor), Input variable which" "X(Tensor), Input variable which"
"mapping the leaf node idx of tdm tree," "mapping the leaf node idx of tdm tree,"
......
...@@ -89,7 +89,7 @@ class PyVariableWrapperHook : public imperative::VariableWrapperHook { ...@@ -89,7 +89,7 @@ class PyVariableWrapperHook : public imperative::VariableWrapperHook {
Py_INCREF(py_func_); Py_INCREF(py_func_);
} }
~PyVariableWrapperHook() { ~PyVariableWrapperHook() override {
py::gil_scoped_acquire gil; py::gil_scoped_acquire gil;
Py_DECREF(py_func_); Py_DECREF(py_func_);
} }
......
...@@ -45,7 +45,7 @@ class IRPrinting : public PassInstrumentation { ...@@ -45,7 +45,7 @@ class IRPrinting : public PassInstrumentation {
explicit IRPrinting(std::unique_ptr<PassManager::IRPrinterOption> option) explicit IRPrinting(std::unique_ptr<PassManager::IRPrinterOption> option)
: option_(std::move(option)) {} : option_(std::move(option)) {}
~IRPrinting() = default; ~IRPrinting() override = default;
void RunBeforePass(Pass *pass, Operation *op) override { void RunBeforePass(Pass *pass, Operation *op) override {
if (option_->print_on_change()) { if (option_->print_on_change()) {
......
...@@ -51,7 +51,7 @@ class Timer { ...@@ -51,7 +51,7 @@ class Timer {
class PassTimer : public PassInstrumentation { class PassTimer : public PassInstrumentation {
public: public:
explicit PassTimer(bool print_module) : print_module_(print_module) {} explicit PassTimer(bool print_module) : print_module_(print_module) {}
~PassTimer() = default; ~PassTimer() override = default;
void RunBeforePipeline(ir::Operation* op) override { void RunBeforePipeline(ir::Operation* op) override {
pipeline_timers_[op] = Timer(); pipeline_timers_[op] = Timer();
......
...@@ -323,7 +323,7 @@ class DeviceTracerImpl : public DeviceTracer { ...@@ -323,7 +323,7 @@ class DeviceTracerImpl : public DeviceTracer {
#endif #endif
} }
void AddAnnotation(uint32_t id, Event *event) { void AddAnnotation(uint32_t id, Event *event) override {
#ifdef PADDLE_WITH_SW #ifdef PADDLE_WITH_SW
std::forward_list<std::pair<uint32_t, Event *>> *local_correlations_pairs = std::forward_list<std::pair<uint32_t, Event *>> *local_correlations_pairs =
nullptr; nullptr;
...@@ -339,7 +339,8 @@ class DeviceTracerImpl : public DeviceTracer { ...@@ -339,7 +339,8 @@ class DeviceTracerImpl : public DeviceTracer {
local_correlations_pairs->push_front(std::make_pair(id, event)); local_correlations_pairs->push_front(std::make_pair(id, event));
} }
void AddAnnotations(const std::map<uint64_t, ThreadEvents> &thr_events) { void AddAnnotations(
const std::map<uint64_t, ThreadEvents> &thr_events) override {
for (auto &tmp : active_kind_records_) { for (auto &tmp : active_kind_records_) {
for (const ActiveKindRecord &r : tmp) { for (const ActiveKindRecord &r : tmp) {
auto iter = thr_events.find(r.thread_id); auto iter = thr_events.find(r.thread_id);
...@@ -384,7 +385,7 @@ class DeviceTracerImpl : public DeviceTracer { ...@@ -384,7 +385,7 @@ class DeviceTracerImpl : public DeviceTracer {
uint64_t start_ns, uint64_t start_ns,
uint64_t end_ns, uint64_t end_ns,
int64_t device_id, int64_t device_id,
uint64_t thread_id) { uint64_t thread_id) override {
if (anno.empty()) { if (anno.empty()) {
VLOG(1) << "Empty timeline annotation."; VLOG(1) << "Empty timeline annotation.";
return; return;
...@@ -409,7 +410,7 @@ class DeviceTracerImpl : public DeviceTracer { ...@@ -409,7 +410,7 @@ class DeviceTracerImpl : public DeviceTracer {
int64_t device_id, int64_t device_id,
int64_t stream_id, int64_t stream_id,
uint32_t correlation_id, uint32_t correlation_id,
uint64_t bytes) { uint64_t bytes) override {
// 0 means timestamp information could not be collected for the kernel. // 0 means timestamp information could not be collected for the kernel.
if (start_ns == 0 || end_ns == 0 || start_ns == end_ns) { if (start_ns == 0 || end_ns == 0 || start_ns == end_ns) {
VLOG(3) << name << " cannot be traced"; VLOG(3) << name << " cannot be traced";
...@@ -427,7 +428,7 @@ class DeviceTracerImpl : public DeviceTracer { ...@@ -427,7 +428,7 @@ class DeviceTracerImpl : public DeviceTracer {
const Place &place, const Place &place,
const std::string &alloc_in, const std::string &alloc_in,
const std::string &free_in, const std::string &free_in,
uint64_t thread_id) { uint64_t thread_id) override {
if (0 == start_ns || 0 == end_ns) { if (0 == start_ns || 0 == end_ns) {
VLOG(3) << alloc_in << ", " << free_in << " Cannot be traced."; VLOG(3) << alloc_in << ", " << free_in << " Cannot be traced.";
return; return;
...@@ -452,7 +453,7 @@ class DeviceTracerImpl : public DeviceTracer { ...@@ -452,7 +453,7 @@ class DeviceTracerImpl : public DeviceTracer {
uint64_t end_ns, uint64_t end_ns,
int64_t device_id, int64_t device_id,
uint64_t thread_id, uint64_t thread_id,
uint32_t correlation_id) { uint32_t correlation_id) override {
if (anno.empty()) { if (anno.empty()) {
VLOG(1) << "Empty timeline annotation."; VLOG(1) << "Empty timeline annotation.";
return; return;
...@@ -478,7 +479,7 @@ class DeviceTracerImpl : public DeviceTracer { ...@@ -478,7 +479,7 @@ class DeviceTracerImpl : public DeviceTracer {
uint64_t end, uint64_t end,
int64_t device_id, int64_t device_id,
int64_t stream_id, int64_t stream_id,
uint32_t correlation_id) { uint32_t correlation_id) override {
// 0 means timestamp information could not be collected for the kernel. // 0 means timestamp information could not be collected for the kernel.
if (start == 0 || end == 0 || start == end) { if (start == 0 || end == 0 || start == end) {
VLOG(3) << correlation_id << " cannot be traced"; VLOG(3) << correlation_id << " cannot be traced";
...@@ -490,12 +491,12 @@ class DeviceTracerImpl : public DeviceTracer { ...@@ -490,12 +491,12 @@ class DeviceTracerImpl : public DeviceTracer {
KernelRecord{name, start, end, device_id, stream_id, correlation_id}); KernelRecord{name, start, end, device_id, stream_id, correlation_id});
} }
bool IsEnabled() { bool IsEnabled() override {
std::lock_guard<std::mutex> l(trace_mu_); std::lock_guard<std::mutex> l(trace_mu_);
return enabled_; return enabled_;
} }
void Enable() { void Enable() override {
std::lock_guard<std::mutex> l(trace_mu_); std::lock_guard<std::mutex> l(trace_mu_);
if (enabled_) { if (enabled_) {
return; return;
...@@ -544,7 +545,7 @@ class DeviceTracerImpl : public DeviceTracer { ...@@ -544,7 +545,7 @@ class DeviceTracerImpl : public DeviceTracer {
enabled_ = true; enabled_ = true;
} }
void Reset() { void Reset() override {
#ifdef PADDLE_WITH_CUPTI #ifdef PADDLE_WITH_CUPTI
CUPTI_CALL( CUPTI_CALL(
dynload::cuptiActivityFlushAll(CUPTI_ACTIVITY_FLAG_FLUSH_FORCED)); dynload::cuptiActivityFlushAll(CUPTI_ACTIVITY_FLAG_FLUSH_FORCED));
...@@ -559,7 +560,7 @@ class DeviceTracerImpl : public DeviceTracer { ...@@ -559,7 +560,7 @@ class DeviceTracerImpl : public DeviceTracer {
for (auto &tmp : active_kind_records_) tmp.clear(); for (auto &tmp : active_kind_records_) tmp.clear();
} }
void GenEventKernelCudaElapsedTime() { void GenEventKernelCudaElapsedTime() override {
#ifdef PADDLE_WITH_CUPTI #ifdef PADDLE_WITH_CUPTI
if (correlations_.empty()) if (correlations_.empty())
for (auto &tmp : correlations_pairs) for (auto &tmp : correlations_pairs)
...@@ -591,7 +592,7 @@ class DeviceTracerImpl : public DeviceTracer { ...@@ -591,7 +592,7 @@ class DeviceTracerImpl : public DeviceTracer {
#endif #endif
} }
proto::Profile GenProfile(const std::string &profile_path) { proto::Profile GenProfile(const std::string &profile_path) override {
proto::Profile profile_pb = this->GetProfile(); proto::Profile profile_pb = this->GetProfile();
std::ofstream profile_f; std::ofstream profile_f;
profile_f.open(profile_path, profile_f.open(profile_path,
...@@ -601,7 +602,7 @@ class DeviceTracerImpl : public DeviceTracer { ...@@ -601,7 +602,7 @@ class DeviceTracerImpl : public DeviceTracer {
return profile_pb; return profile_pb;
} }
proto::Profile GetProfile() { proto::Profile GetProfile() override {
int miss = 0, find = 0; int miss = 0, find = 0;
std::lock_guard<std::mutex> l(trace_mu_); std::lock_guard<std::mutex> l(trace_mu_);
proto::Profile profile_pb; proto::Profile profile_pb;
...@@ -711,7 +712,7 @@ class DeviceTracerImpl : public DeviceTracer { ...@@ -711,7 +712,7 @@ class DeviceTracerImpl : public DeviceTracer {
return profile_pb; return profile_pb;
} }
void Disable() { void Disable() override {
#ifdef PADDLE_WITH_CUPTI #ifdef PADDLE_WITH_CUPTI
// flush might cause additional calls to DeviceTracker. // flush might cause additional calls to DeviceTracker.
CUPTI_CALL( CUPTI_CALL(
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册