未验证 提交 b02687cc 编写于 作者: G Galaxy1458 提交者: GitHub

remove some [-Wunused-parameter] (#53162)

* test,test=develop

* test,test=develop

* test,test=develop

* test,test=develop

* test,test=develop

* test,test=develop

* test,test=develop
上级 b406a7db
......@@ -976,11 +976,11 @@ class GraphDataGenerator {
void ResetEpochFinish() { epoch_finish_ = false; }
void ClearSampleState();
void DumpWalkPath(std::string dump_path, size_t dump_rate);
void SetDeviceKeys(std::vector<uint64_t>* device_keys, int type) {
void SetDeviceKeys(std::vector<uint64_t>* device_keys UNUSED,
int type UNUSED) {
// type_to_index_[type] = h_device_keys_.size();
// h_device_keys_.push_back(device_keys);
}
std::vector<std::shared_ptr<phi::Allocation>> SampleNeighbors(
int64_t* uniq_nodes,
int len,
......@@ -1126,7 +1126,7 @@ class DataFeed {
}
virtual ~DataFeed() {}
virtual void Init(const DataFeedDesc& data_feed_desc) = 0;
virtual bool CheckFile(const char* filename) {
virtual bool CheckFile(const char* filename UNUSED) {
PADDLE_THROW(platform::errors::Unimplemented(
"This function(CheckFile) is not implemented."));
}
......@@ -1415,13 +1415,13 @@ class InMemoryDataFeed : public DataFeed {
protected:
virtual bool ParseOneInstance(T* instance) = 0;
virtual bool ParseOneInstanceFromPipe(T* instance) = 0;
virtual void ParseOneInstanceFromSo(const char* str,
T* instance,
CustomParser* parser) {}
virtual int ParseInstanceFromSo(int len,
const char* str,
std::vector<T>* instances,
CustomParser* parser) {
virtual void ParseOneInstanceFromSo(const char* str UNUSED,
T* instance UNUSED,
CustomParser* parser UNUSED) {}
virtual int ParseInstanceFromSo(int len UNUSED,
const char* str UNUSED,
std::vector<T>* instances UNUSED,
CustomParser* parser UNUSED) {
return 0;
}
virtual void PutToFeedVec(const std::vector<T>& ins_vec) = 0;
......@@ -1656,7 +1656,7 @@ struct RecordCandidate {
class RecordCandidateList {
public:
RecordCandidateList() = default;
RecordCandidateList(const RecordCandidateList&) {}
RecordCandidateList(const RecordCandidateList& UNUSED) {}
size_t Size() { return cur_size_; }
void ReSize(size_t length);
......@@ -1802,9 +1802,9 @@ class MultiSlotInMemoryDataFeed : public InMemoryDataFeed<Record> {
protected:
virtual bool ParseOneInstance(Record* instance);
virtual bool ParseOneInstanceFromPipe(Record* instance);
virtual void ParseOneInstanceFromSo(const char* str,
Record* instance,
CustomParser* parser) {}
virtual void ParseOneInstanceFromSo(const char* str UNUSED,
Record* instance UNUSED,
CustomParser* parser UNUSED) {}
virtual int ParseInstanceFromSo(int len,
const char* str,
std::vector<Record>* instances,
......@@ -1828,11 +1828,13 @@ class SlotRecordInMemoryDataFeed : public InMemoryDataFeed<SlotRecord> {
protected:
bool Start() override;
int Next() override;
bool ParseOneInstance(SlotRecord* instance) override { return false; }
bool ParseOneInstanceFromPipe(SlotRecord* instance) override { return false; }
bool ParseOneInstance(SlotRecord* instance UNUSED) override { return false; }
bool ParseOneInstanceFromPipe(SlotRecord* instance UNUSED) override {
return false;
}
// virtual void ParseOneInstanceFromSo(const char* str, T* instance,
// CustomParser* parser) {}
void PutToFeedVec(const std::vector<SlotRecord>& ins_vec) override {}
void PutToFeedVec(const std::vector<SlotRecord>& ins_vec UNUSED) override {}
virtual void LoadIntoMemoryByCommand(void);
virtual void LoadIntoMemoryByLib(void);
......
......@@ -320,7 +320,7 @@ struct OpInfoFiller<T, kVarTypeInference> {
template <typename T>
struct OpInfoFiller<T, kShapeInference> {
void operator()(const char* op_type, OpInfo* info) const {
void operator()(const char* op_type UNUSED, OpInfo* info) const {
// Note: if fill InferShapeFN by this Filler, the infershape here
// will overwrite the op->InferShape func registered in kOperator Filler
info->infer_shape_ = [](InferShapeContext* ctx) {
......@@ -360,7 +360,7 @@ struct OpInfoFiller<T, kNoNeedBufferVarsInference> {
// A fake OpInfoFiller of void
template <>
struct OpInfoFiller<void, kUnknown> {
void operator()(const char* op_type, OpInfo* info) const {}
void operator()(const char* op_type UNUSED, OpInfo* info UNUSED) const {}
};
} // namespace details
......
......@@ -18,7 +18,7 @@
#include "paddle/fluid/framework/op_desc.h"
#include "paddle/fluid/framework/type_defs.h"
#include "paddle/phi/core/macros.h"
namespace paddle {
namespace framework {
......@@ -40,7 +40,7 @@ class InplaceOpInference {
class class_name final : public ::paddle::framework::InplaceOpInference { \
public: \
std::unordered_map<std::string, std::string> operator()( \
bool use_cuda) const final { \
bool use_cuda UNUSED) const final { \
return {__VA_ARGS__}; \
} \
}
......
......@@ -53,7 +53,8 @@ class operation_visitor {
: type_(type) {}
template <typename T1, typename T2>
Attribute operator()(const T1& attr, const T2& operation) const {
Attribute operator()(const T1& attr UNUSED,
const T2& operation UNUSED) const {
PADDLE_THROW(platform::errors::Unimplemented("Unimplemented operand."));
}
......
......@@ -169,7 +169,7 @@ class Pass {
virtual bool SupportApplyProgramViaGraph() const { return true; }
protected:
virtual void ApplyImpl(Graph *graph) const {
virtual void ApplyImpl(Graph *graph UNUSED) const {
PADDLE_THROW(platform::errors::Unimplemented(
"The virtual pass called is not implemented."));
}
......
......@@ -25,6 +25,7 @@ limitations under the License. */
#include "paddle/fluid/framework/attribute.h"
#include "paddle/fluid/framework/type_defs.h"
#include "paddle/fluid/framework/var_desc.h"
#include "paddle/phi/core/macros.h"
namespace paddle {
namespace framework {
......@@ -150,7 +151,7 @@ class OpDesc {
const AttributeMap &GetRuntimeAttrMap() const;
std::vector<std::string> InputNames(bool with_attr_var = false) const {
std::vector<std::string> InputNames(bool with_attr_var UNUSED = false) const {
return MapKeys(inputs_);
}
std::vector<std::string> OutputNames() const { return MapKeys(outputs_); }
......
......@@ -35,6 +35,7 @@ limitations under the License. */
#include "paddle/fluid/framework/scope.h"
#include "paddle/fluid/framework/shape_inference.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/macros.h"
namespace paddle {
namespace framework {
......@@ -218,9 +219,9 @@ struct OpKernelRegistrarFunctor<PlaceType, false, I, KernelTypes...> {
template <typename PlaceType, size_t I, typename... KernelType>
struct OpKernelRegistrarFunctor<PlaceType, true, I, KernelType...> {
void operator()(const char* op_type,
const char* library_type,
int customized_type_value) const {}
void operator()(const char* op_type UNUSED,
const char* library_type UNUSED,
int customized_type_value UNUSED) const {}
};
// User can register many kernel in one place. The data type could be
......
......@@ -44,6 +44,7 @@ limitations under the License. */
#include "paddle/phi/core/compat/op_utils.h"
#include "paddle/phi/core/kernel_context.h"
#include "paddle/phi/core/kernel_factory.h"
#include "paddle/phi/core/macros.h"
#include "paddle/utils/flat_hash_map.h"
namespace paddle {
......@@ -353,11 +354,11 @@ class OperatorBase {
void SetIsCalledByExecutor(bool x) { run_by_executor_ = x; }
virtual void SetIsRuntimeInferShape(bool x) {}
virtual void SetIsRuntimeInferShape(bool x UNUSED) {}
virtual void RuntimeInferShape(const Scope& scope,
const platform::Place& place,
const RuntimeContext& ctx) const {}
virtual void RuntimeInferShape(const Scope& scope UNUSED,
const platform::Place& place UNUSED,
const RuntimeContext& ctx UNUSED) const {}
virtual platform::Place GetExecutionPlace(
const platform::Place& place) const {
......@@ -794,7 +795,7 @@ class OperatorWithKernel : public OperatorBase {
const phi::KernelKey& expected_kernel_type) const;
platform::Place GetExecutionPlace(
const platform::Place& platform) const override {
const platform::Place& platform UNUSED) const override {
return kernel_type_->place_;
}
......
......@@ -73,7 +73,7 @@ class ReaderBase {
virtual ~ReaderBase();
protected:
virtual void ReadNextImpl(paddle::framework::LoDTensorArray* out) {}
virtual void ReadNextImpl(paddle::framework::LoDTensorArray* out UNUSED) {}
virtual void ShutdownImpl() {}
......
......@@ -61,7 +61,7 @@ class GradOpBaseMakerBase {
virtual std::shared_ptr<GradOpNode> operator()() const = 0;
TracedVarList<VarBase, TracedVarRole::kBackward> InputGrad(
const std::string& name, bool drop_empty_grad = true) const {
const std::string& name, bool drop_empty_grad UNUSED = true) const {
return GetVarBaseList<TracedVarRole::kBackward>(name, /*is_input=*/true);
}
......
......@@ -118,7 +118,8 @@ class OpBase {
attrs_[name] = v;
}
void SetBlockAttr(const std::string& name, framework::BlockDesc* block) {
void SetBlockAttr(const std::string& name UNUSED,
framework::BlockDesc* block UNUSED) {
PADDLE_THROW(platform::errors::PermissionDenied(
"SetBlockAttr is not support in dygraph OpBase"));
}
......
......@@ -70,7 +70,7 @@ struct Transform {
template <>
struct Transform<phi::CPUContext> {
template <typename InputIter, typename OutputIter, typename UnaryOperation>
void operator()(const phi::CPUContext& context,
void operator()(const phi::CPUContext& context UNUSED,
InputIter first,
InputIter last,
OutputIter result,
......
......@@ -254,7 +254,7 @@ struct InferMetaFnImpl<Return (*)(Args...), infer_meta_fn> {
template <typename T>
struct InferMetaFnCallHelper<InferMetaTypeTag<T>> {
template <int in_idx, int attr_idx, int out_idx>
static void Call(InferMetaContext* ctx, Args&... args) {
static void Call(InferMetaContext* ctx UNUSED, Args&... args) {
return infer_meta_fn(args...);
}
};
......
......@@ -224,7 +224,7 @@ class TransformFunctor {
trans(ctx_, x_, x_ + nx_, y_, z_, func_);
}
inline void RunRowWise(int n, int pre) const {
inline void RunRowWise(int n) const {
phi::Transform<DeviceContext> trans;
if (is_xsize_larger_) {
trans(ctx_,
......@@ -243,7 +243,7 @@ class TransformFunctor {
}
}
inline void RunMidWise(int n, int pre, int post) const {
inline void RunMidWise(int n, int post) const {
phi::Transform<DeviceContext> trans;
if (is_xsize_larger_) {
trans(ctx_,
......@@ -435,10 +435,10 @@ void ElementwiseCompute(const CPUContext &dev_ctx,
}
if (post == 1) {
functor.RunRowWise(n, pre);
functor.RunRowWise(n);
return;
} else {
functor.RunMidWise(n, pre, post);
functor.RunMidWise(n, post);
return;
}
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册