未验证 提交 c5fe109b 编写于 作者: W Wang Xin 提交者: GitHub

[CodeStyle] fix macos inconsistent-missing-override warnings and add -Werror (#47264)

* fix macos inconsistent-missing-override warnings

* fix inconsistent-missing-override error in test
上级 31f57f29
......@@ -214,7 +214,8 @@ if(APPLE)
CACHE STRING "Build architectures for OSX" FORCE)
endif()
# On Mac OS X register class specifier is deprecated and will cause warning error on latest clang 10.0
set(COMMON_FLAGS -Wno-deprecated-register -Werror=format)
set(COMMON_FLAGS -Wno-deprecated-register -Werror=format
-Werror=inconsistent-missing-override)
endif()
if(WITH_HETERPS AND WITH_PSLIB)
......
......@@ -46,7 +46,7 @@ class GradNodeAccumulation : public GradNodeBase {
void ClearTensorWrappers() override { VLOG(5) << "Do nothing here now"; }
std::string name() { return "GradNodeAccumulation"; }
std::string name() override { return "GradNodeAccumulation"; }
/**
* Register ReduceHook
......
......@@ -46,7 +46,7 @@ class RunCustomOpNode : public GradNodeBase {
bool is_new_grad = false) // NOLINT
override;
std::string name() {
std::string name() override {
return paddle::string::Sprintf("RunCustomOpNode: %s_grad", op_type_);
}
......@@ -116,7 +116,7 @@ class RunCustomOpDoubleGradNode : public GradNodeBase {
bool is_new_grad = false) // NOLINT
override;
std::string name() {
std::string name() override {
return paddle::string::Sprintf("RunCustomOpDoubleGradNode: %s_grad_grad",
op_type_);
}
......
......@@ -59,9 +59,9 @@ class CompatMetaTensor : public phi::MetaTensor {
bool initialized() const override { return initialized_; };
bool is_selected_rows() const;
bool is_tensor_array() const;
bool is_dense() const;
bool is_selected_rows() const override;
bool is_tensor_array() const override;
bool is_dense() const override;
operator unspecified_bool_type() const override {
return initialized_ ? unspecified_bool_true : 0;
......
......@@ -68,12 +68,12 @@ class RuntimeInferVarTypeContext : public framework::InferVarTypeContext {
return (it != outputs_.end() && it->second.size() > 0);
}
size_t InputSize(const std::string& name) const {
size_t InputSize(const std::string& name) const override {
return inputs_.at(name).size();
}
const std::string& InputVarName(const std::string& name,
const int index = 0) const {
const int index = 0) const override {
return GetNameFromVar(inputs_.at(name)[index]);
}
......
......@@ -143,13 +143,13 @@ class AnalysisPredictor : public PaddlePredictor {
///
/// \return input names
///
std::vector<std::string> GetInputNames();
std::vector<std::string> GetInputNames() override;
///
/// \brief Get the output names
///
/// \return output names
///
std::vector<std::string> GetOutputNames();
std::vector<std::string> GetOutputNames() override;
///
/// \brief Get the Input Tensor object
......@@ -227,7 +227,7 @@ class AnalysisPredictor : public PaddlePredictor {
/// \brief Clear the intermediate tensors of the predictor
///
///
void ClearIntermediateTensor();
void ClearIntermediateTensor() override;
///
/// \brief Release all tmp tensor to compress the size of the memory pool.
......
......@@ -33,7 +33,7 @@ class RecordedAllocator : public Allocator {
return new Allocation(malloc(size), size, platform::CPUPlace());
}
void FreeImpl(phi::Allocation *allocation) {
void FreeImpl(phi::Allocation *allocation) override {
allocated_size_ -= allocation->size();
free(allocation->ptr());
delete allocation;
......@@ -88,7 +88,7 @@ class LimitedResourceAllocator : public Allocator {
return new Allocation(malloc(size), size, platform::CPUPlace());
}
void FreeImpl(phi::Allocation *allocation) {
void FreeImpl(phi::Allocation *allocation) override {
allocated_size_ -= allocation->size();
free(allocation->ptr());
delete allocation;
......
......@@ -346,14 +346,14 @@ class CrossEntropyGradientOp2 : public CrossEntropyGradientOpBase {
}
protected:
virtual framework::DDim GetXDim(framework::InferShapeContext* ctx) const {
framework::DDim GetXDim(framework::InferShapeContext* ctx) const override {
auto x_shape = ctx->GetInputDim("XShape");
return framework::DDim(x_shape.Get(), x_shape.size() - 1);
}
virtual const char* VarNameWithXLoD() const { return "XShape"; }
const char* VarNameWithXLoD() const override { return "XShape"; }
virtual bool IsSoftLabel(framework::InferShapeContext* ctx) const {
bool IsSoftLabel(framework::InferShapeContext* ctx) const override {
return false;
}
};
......
......@@ -41,7 +41,7 @@ class DecodeJpegOp : public framework::OperatorWithKernel {
framework::OpKernelType GetKernelTypeForVar(
const std::string& var_name,
const phi::DenseTensor& tensor,
const framework::OpKernelType& expected_kernel_type) const {
const framework::OpKernelType& expected_kernel_type) const override {
if (var_name == "X") {
return expected_kernel_type;
}
......
......@@ -51,7 +51,7 @@ class ElementwiseDivOpDoubleGrad : public framework::OperatorWithKernel {
framework::OpKernelType GetKernelTypeForVar(
const std::string& var_name,
const phi::DenseTensor& tensor,
const framework::OpKernelType& expected_kernel_type) const {
const framework::OpKernelType& expected_kernel_type) const override {
if (framework::IsComplexType(expected_kernel_type.data_type_)) {
// only promote inputs’s types when contains complex input
return framework::OpKernelType(
......
......@@ -38,7 +38,7 @@ class ElementwiseMulOp : public ElementwiseOp {
framework::OpKernelType GetKernelTypeForVar(
const std::string& var_name,
const phi::DenseTensor& tensor,
const framework::OpKernelType& expected_kernel_type) const {
const framework::OpKernelType& expected_kernel_type) const override {
if (framework::IsComplexType(expected_kernel_type.data_type_)) {
// only promote inputs’s types when contains complex input
return framework::OpKernelType(
......
......@@ -357,7 +357,7 @@ class ElementwiseOpDoubleGrad : public framework::OperatorWithKernel {
framework::OpKernelType GetKernelTypeForVar(
const std::string &var_name,
const phi::DenseTensor &tensor,
const framework::OpKernelType &expected_kernel_type) const {
const framework::OpKernelType &expected_kernel_type) const override {
if (framework::IsComplexType(expected_kernel_type.data_type_)) {
// only promote inputs’s types when contains complex input
return framework::OpKernelType(
......@@ -409,7 +409,7 @@ class ElementwiseOpDoubleGradWithoutDXDY
framework::OpKernelType GetKernelTypeForVar(
const std::string &var_name,
const phi::DenseTensor &tensor,
const framework::OpKernelType &expected_kernel_type) const {
const framework::OpKernelType &expected_kernel_type) const override {
if (framework::IsComplexType(expected_kernel_type.data_type_)) {
// only promote inputs’s types when contains complex input
return framework::OpKernelType(
......@@ -461,7 +461,7 @@ class ElementwiseOpTripleGrad : public framework::OperatorWithKernel {
framework::OpKernelType GetKernelTypeForVar(
const std::string &var_name,
const phi::DenseTensor &tensor,
const framework::OpKernelType &expected_kernel_type) const {
const framework::OpKernelType &expected_kernel_type) const override {
if (framework::IsComplexType(expected_kernel_type.data_type_)) {
// only promote inputs’s types when contains complex input
return framework::OpKernelType(
......
......@@ -73,7 +73,7 @@ class InplaceABNGradOp : public paddle::operators::BatchNormGradOp {
public:
using paddle::operators::BatchNormGradOp::BatchNormGradOp;
void InferShape(framework::InferShapeContext* ctx) const {
void InferShape(framework::InferShapeContext* ctx) const override {
// check input
OP_INOUT_CHECK(ctx->HasInput("Scale"), "Input", "Scale", "InplaceABNGrad");
OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Y")),
......
......@@ -60,7 +60,7 @@ class SGDOp : public framework::OperatorWithKernel {
framework::OpKernelType GetKernelTypeForVar(
const std::string &var_name,
const phi::DenseTensor &tensor,
const framework::OpKernelType &expected_kernel_type) const {
const framework::OpKernelType &expected_kernel_type) const override {
if (var_name == "LearningRate") {
return framework::OpKernelType(
framework::TransToProtoVarType(tensor.dtype()),
......
......@@ -718,7 +718,7 @@ class Pad2dOp : public framework::OperatorWithKernel {
framework::OpKernelType GetKernelTypeForVar(
const std::string& var_name,
const phi::DenseTensor& tensor,
const framework::OpKernelType& expected_kernel_type) const {
const framework::OpKernelType& expected_kernel_type) const override {
#ifdef PADDLE_WITH_MKLDNN
if ((expected_kernel_type.data_layout_ == phi::DataLayout::kMKLDNN) &&
(tensor.layout() != phi::DataLayout::kMKLDNN)) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册