未验证 提交 f9e5072b 编写于 作者: G Galaxy1458 提交者: GitHub

remove some [-Wunused-parameter] waring (#53319)

* test,test=develop

* test,test=develop

* test,test=develop

* test,test=develop

* test,test=develop

* test,test=develop

* test,test=develop
上级 cd88156a
...@@ -31,6 +31,7 @@ ...@@ -31,6 +31,7 @@
#include "paddle/fluid/framework/scope.h" #include "paddle/fluid/framework/scope.h"
#include "paddle/fluid/platform/device_context.h" #include "paddle/fluid/platform/device_context.h"
#include "paddle/fluid/platform/place.h" #include "paddle/fluid/platform/place.h"
#include "paddle/phi/core/macros.h"
namespace google { namespace google {
namespace protobuf { namespace protobuf {
...@@ -96,7 +97,9 @@ class PSServer { ...@@ -96,7 +97,9 @@ class PSServer {
virtual int32_t StartS2S() { return 0; } virtual int32_t StartS2S() { return 0; }
virtual ::std::future<int32_t> SendPServer2PServerMsg( virtual ::std::future<int32_t> SendPServer2PServerMsg(
int msg_type, int to_pserver_id, const std::string &msg) { int msg_type UNUSED,
int to_pserver_id UNUSED,
const std::string &msg UNUSED) {
LOG(FATAL) << "NotImplementError: PSServer::send_pserver2pserver_msg"; LOG(FATAL) << "NotImplementError: PSServer::send_pserver2pserver_msg";
std::promise<int32_t> promise; std::promise<int32_t> promise;
std::future<int> fut = promise.get_future(); std::future<int> fut = promise.get_future();
...@@ -124,9 +127,9 @@ class PSServer { ...@@ -124,9 +127,9 @@ class PSServer {
} }
return itr->second(msg_type, from_pserver_id, msg); return itr->second(msg_type, from_pserver_id, msg);
} }
virtual int32_t ReceiveFromPServer(int msg_type, virtual int32_t ReceiveFromPServer(int msg_type UNUSED,
int pserver_id, int pserver_id UNUSED,
const std::string &msg) { const std::string &msg UNUSED) {
LOG(FATAL) << "NotImplementError::PSServer::ReceiveFromPServer"; LOG(FATAL) << "NotImplementError::PSServer::ReceiveFromPServer";
return -1; return -1;
} }
......
...@@ -70,18 +70,20 @@ class BarrierTable : public Table { ...@@ -70,18 +70,20 @@ class BarrierTable : public Table {
BarrierTable() {} BarrierTable() {}
virtual ~BarrierTable() {} virtual ~BarrierTable() {}
virtual void *GetShard(size_t shard_idx) { return 0; } virtual void *GetShard(size_t shard_idx UNUSED) { return 0; }
virtual int32_t Pull(TableContext &context) { return 0; } // NOLINT virtual int32_t Pull(TableContext &context UNUSED) { return 0; } // NOLINT
virtual int32_t Push(TableContext &context) { return 0; } // NOLINT virtual int32_t Push(TableContext &context UNUSED) { return 0; } // NOLINT
int32_t Shrink(const std::string &param) override { return 0; } int32_t Shrink(const std::string &param UNUSED) override { return 0; }
virtual void Clear() {} virtual void Clear() {}
virtual int32_t Flush() { return 0; } virtual int32_t Flush() { return 0; }
virtual int32_t Load(const std::string &path, const std::string &param) { virtual int32_t Load(const std::string &path UNUSED,
const std::string &param UNUSED) {
return 0; return 0;
} }
virtual int32_t Save(const std::string &path, const std::string &param) { virtual int32_t Save(const std::string &path UNUSED,
const std::string &param UNUSED) {
return 0; return 0;
} }
virtual int32_t InitializeShard() { return 0; } virtual int32_t InitializeShard() { return 0; }
......
...@@ -32,6 +32,7 @@ ...@@ -32,6 +32,7 @@
#include "paddle/fluid/platform/device_context.h" #include "paddle/fluid/platform/device_context.h"
#include "paddle/fluid/platform/place.h" #include "paddle/fluid/platform/place.h"
#include "paddle/fluid/string/string_helper.h" #include "paddle/fluid/string/string_helper.h"
#include "paddle/phi/core/macros.h"
namespace paddle { namespace paddle {
namespace distributed { namespace distributed {
...@@ -77,22 +78,22 @@ class Table { ...@@ -77,22 +78,22 @@ class Table {
virtual int32_t Push(TableContext &context) = 0; // NOLINT virtual int32_t Push(TableContext &context) = 0; // NOLINT
// only for barrier // only for barrier
virtual int32_t Barrier(const uint32_t trainer_id, virtual int32_t Barrier(const uint32_t trainer_id UNUSED,
const std::string barrier_type) { const std::string barrier_type UNUSED) {
return 0; return 0;
} }
// only for barrier table // only for barrier table
virtual int32_t SetTableMap( virtual int32_t SetTableMap(
std::unordered_map<uint32_t, std::shared_ptr<Table>> *table_map) { std::unordered_map<uint32_t, std::shared_ptr<Table>> *table_map UNUSED) {
return 0; return 0;
} }
// only for tensor table // only for tensor table
virtual int32_t SetProgramEnv( virtual int32_t SetProgramEnv(
framework::Scope *scope, framework::Scope *scope UNUSED,
platform::Place place, platform::Place place UNUSED,
const std::vector<framework::ProgramDesc> *sub_program) { const std::vector<framework::ProgramDesc> *sub_program UNUSED) {
return 0; return 0;
} }
...@@ -115,23 +116,23 @@ class Table { ...@@ -115,23 +116,23 @@ class Table {
const std::string &converter) = 0; const std::string &converter) = 0;
// for cache // for cache
virtual int32_t SaveCache( virtual int32_t SaveCache(
const std::string &path, const std::string &path UNUSED,
const std::string &param, const std::string &param UNUSED,
paddle::framework::Channel<std::pair<uint64_t, std::string>> paddle::framework::Channel<std::pair<uint64_t, std::string>>
&shuffled_channel) { &shuffled_channel UNUSED) {
return 0; return 0;
} }
virtual int64_t CacheShuffle( virtual int64_t CacheShuffle(
const std::string &path, const std::string &path UNUSED,
const std::string &param, const std::string &param UNUSED,
double cache_threshold, double cache_threshold UNUSED,
std::function<std::future<int32_t>( std::function<std::future<int32_t>(
int msg_type, int to_pserver_id, std::string &msg)> // NOLINT int msg_type, int to_pserver_id, std::string &msg)> // NOLINT
send_msg_func, send_msg_func UNUSED,
paddle::framework::Channel<std::pair<uint64_t, std::string>> paddle::framework::Channel<std::pair<uint64_t, std::string>>
&shuffled_channel, &shuffled_channel UNUSED,
const std::vector<Table *> &table_ptrs) { const std::vector<Table *> &table_ptrs UNUSED) {
return 0; return 0;
} }
...@@ -149,7 +150,7 @@ class Table { ...@@ -149,7 +150,7 @@ class Table {
virtual void *GetShard(size_t shard_idx) = 0; virtual void *GetShard(size_t shard_idx) = 0;
virtual std::pair<int64_t, int64_t> PrintTableStat() { return {0, 0}; } virtual std::pair<int64_t, int64_t> PrintTableStat() { return {0, 0}; }
virtual int32_t CacheTable(uint16_t pass_id) { return 0; } virtual int32_t CacheTable(uint16_t pass_id UNUSED) { return 0; }
// for patch model // for patch model
virtual void Revert() {} virtual void Revert() {}
......
...@@ -402,22 +402,22 @@ class CustomParser { ...@@ -402,22 +402,22 @@ class CustomParser {
virtual void Init(const std::vector<SlotConf>& slots) = 0; virtual void Init(const std::vector<SlotConf>& slots) = 0;
virtual bool Init(const std::vector<AllSlotInfo>& slots) = 0; virtual bool Init(const std::vector<AllSlotInfo>& slots) = 0;
virtual void ParseOneInstance(const char* str, Record* instance) = 0; virtual void ParseOneInstance(const char* str, Record* instance) = 0;
virtual int ParseInstance(int len, virtual int ParseInstance(int len UNUSED,
const char* str, const char* str UNUSED,
std::vector<Record>* instances) { std::vector<Record>* instances UNUSED) {
return 0; return 0;
} }
virtual bool ParseOneInstance( virtual bool ParseOneInstance(
const std::string& line, const std::string& line UNUSED,
std::function<void(std::vector<SlotRecord>&, int)> std::function<void(std::vector<SlotRecord>&, int)> GetInsFunc
GetInsFunc) { // NOLINT UNUSED) { // NOLINT
return true; return true;
} }
virtual bool ParseFileInstance( virtual bool ParseFileInstance(
std::function<int(char* buf, int len)> ReadBuffFunc, std::function<int(char* buf, int len)> ReadBuffFunc UNUSED,
std::function<void(std::vector<SlotRecord>&, int, int)> std::function<void(std::vector<SlotRecord>&, int, int)> PullRecordsFunc
PullRecordsFunc, // NOLINT UNUSED, // NOLINT
int& lines) { // NOLINT int& lines UNUSED) { // NOLINT
return false; return false;
} }
}; };
...@@ -1267,7 +1267,8 @@ class DataFeed { ...@@ -1267,7 +1267,8 @@ class DataFeed {
virtual void SetInsIdVec(MiniBatchGpuPack* pack) {} virtual void SetInsIdVec(MiniBatchGpuPack* pack) {}
#endif #endif
virtual void DumpWalkPath(std::string dump_path, size_t dump_rate) { virtual void DumpWalkPath(std::string dump_path UNUSED,
size_t dump_rate UNUSED) {
PADDLE_THROW(platform::errors::Unimplemented( PADDLE_THROW(platform::errors::Unimplemented(
"This function(DumpWalkPath) is not implemented.")); "This function(DumpWalkPath) is not implemented."));
} }
......
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#include <unordered_set> #include <unordered_set>
#include <utility> #include <utility>
#include <vector> #include <vector>
#include "paddle/phi/core/macros.h"
#ifdef PADDLE_WITH_GLOO #ifdef PADDLE_WITH_GLOO
#include <gloo/broadcast.h> #include <gloo/broadcast.h>
...@@ -51,12 +52,12 @@ class Dataset { ...@@ -51,12 +52,12 @@ class Dataset {
Dataset() {} Dataset() {}
virtual ~Dataset() {} virtual ~Dataset() {}
// do sample // do sample
virtual void TDMSample(const std::string tree_name, virtual void TDMSample(const std::string tree_name UNUSED,
const std::string tree_path, const std::string tree_path UNUSED,
const std::vector<uint16_t> tdm_layer_counts, const std::vector<uint16_t> tdm_layer_counts UNUSED,
const uint16_t start_sample_layer, const uint16_t start_sample_layer UNUSED,
const bool with_hierachy, const bool with_hierachy UNUSED,
const uint16_t seed_, const uint16_t seed_ UNUSED,
const uint16_t sample_slot) {} const uint16_t sample_slot) {}
// set file list // set file list
virtual void SetFileList(const std::vector<std::string>& filelist) = 0; virtual void SetFileList(const std::vector<std::string>& filelist) = 0;
...@@ -238,8 +239,9 @@ class DatasetImpl : public Dataset { ...@@ -238,8 +239,9 @@ class DatasetImpl : public Dataset {
virtual void WaitPreLoadDone(); virtual void WaitPreLoadDone();
virtual void ReleaseMemory(); virtual void ReleaseMemory();
virtual void LocalShuffle(); virtual void LocalShuffle();
virtual void GlobalShuffle(int thread_num = -1) {} virtual void GlobalShuffle(int thread_num UNUSED = -1) {}
virtual void SlotsShuffle(const std::set<std::string>& slots_to_replace) {} virtual void SlotsShuffle(
const std::set<std::string>& slots_to_replace UNUSED) {}
virtual const std::vector<T>& GetSlotsOriginalData() { virtual const std::vector<T>& GetSlotsOriginalData() {
return slots_shuffle_original_data_; return slots_shuffle_original_data_;
} }
...@@ -251,12 +253,12 @@ class DatasetImpl : public Dataset { ...@@ -251,12 +253,12 @@ class DatasetImpl : public Dataset {
virtual void MergeByInsId() {} virtual void MergeByInsId() {}
virtual void PreprocessInstance() {} virtual void PreprocessInstance() {}
virtual void PostprocessInstance() {} virtual void PostprocessInstance() {}
virtual void SetCurrentPhase(int current_phase) {} virtual void SetCurrentPhase(int current_phase UNUSED) {}
virtual void GenerateLocalTablesUnlock(int table_id, virtual void GenerateLocalTablesUnlock(int table_id UNUSED,
int feadim, int feadim UNUSED,
int read_thread_num, int read_thread_num UNUSED,
int consume_thread_num, int consume_thread_num UNUSED,
int shard_num) {} int shard_num UNUSED) {}
virtual void ClearLocalTables() {} virtual void ClearLocalTables() {}
virtual void CreatePreLoadReaders(); virtual void CreatePreLoadReaders();
virtual void DestroyPreLoadReaders(); virtual void DestroyPreLoadReaders();
...@@ -288,9 +290,9 @@ class DatasetImpl : public Dataset { ...@@ -288,9 +290,9 @@ class DatasetImpl : public Dataset {
virtual uint32_t GetPassID() { return pass_id_; } virtual uint32_t GetPassID() { return pass_id_; }
protected: protected:
virtual int ReceiveFromClient(int msg_type, virtual int ReceiveFromClient(int msg_type UNUSED,
int client_id, int client_id UNUSED,
const std::string& msg) { const std::string& msg UNUSED) {
// TODO(yaoxuefeng) for SlotRecordDataset // TODO(yaoxuefeng) for SlotRecordDataset
return -1; return -1;
} }
......
...@@ -44,6 +44,7 @@ limitations under the License. */ ...@@ -44,6 +44,7 @@ limitations under the License. */
#include "paddle/fluid/platform/place.h" #include "paddle/fluid/platform/place.h"
#include "paddle/fluid/platform/timer.h" #include "paddle/fluid/platform/timer.h"
#include "paddle/phi/backends/dynload/port.h" #include "paddle/phi/backends/dynload/port.h"
#include "paddle/phi/core/macros.h"
namespace paddle { namespace paddle {
namespace framework { namespace framework {
...@@ -179,14 +180,14 @@ class DeviceWorker { ...@@ -179,14 +180,14 @@ class DeviceWorker {
virtual void BindingDataFeedMemory() = 0; virtual void BindingDataFeedMemory() = 0;
virtual void SetRootScope(Scope* root_scope); virtual void SetRootScope(Scope* root_scope);
virtual void SetDataFeed(DataFeed* data_feed); virtual void SetDataFeed(DataFeed* data_feed);
virtual void SetWorkerNum(int num) {} virtual void SetWorkerNum(int num UNUSED) {}
virtual void CacheProgram(const ProgramDesc& main_program) {} virtual void CacheProgram(const ProgramDesc& main_program UNUSED) {}
virtual void ProduceTasks() {} virtual void ProduceTasks() {}
virtual void GetXpuOpIndex() {} virtual void GetXpuOpIndex() {}
virtual void Schedule(int taskid) {} virtual void Schedule(int taskid UNUSED) {}
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
virtual void SetStream(const gpuStream_t stream) {} virtual void SetStream(const gpuStream_t stream UNUSED) {}
virtual void SetEvent(const gpuEvent_t event) {} virtual void SetEvent(const gpuEvent_t event UNUSED) {}
#endif #endif
virtual void SetNeedDumpField(bool need_dump_field) { virtual void SetNeedDumpField(bool need_dump_field) {
need_dump_field_ = need_dump_field; need_dump_field_ = need_dump_field;
...@@ -256,7 +257,7 @@ class CPUWorkerBase : public DeviceWorker { ...@@ -256,7 +257,7 @@ class CPUWorkerBase : public DeviceWorker {
virtual void TrainFiles() = 0; virtual void TrainFiles() = 0;
virtual void TrainFilesWithProfiler() {} virtual void TrainFilesWithProfiler() {}
virtual void PrintFetchVars() {} virtual void PrintFetchVars() {}
virtual void CreateDeviceResource(const ProgramDesc& main_prog) {} virtual void CreateDeviceResource(const ProgramDesc& main_prog UNUSED) {}
protected: protected:
int thread_id_; int thread_id_;
...@@ -684,7 +685,7 @@ class SectionWorker : public DeviceWorker { ...@@ -684,7 +685,7 @@ class SectionWorker : public DeviceWorker {
void PrepareUnusedVar(); void PrepareUnusedVar();
void BindingDataFeedMemory() override {} void BindingDataFeedMemory() override {}
void CreateDeviceResource(const ProgramDesc& main_prog) override{}; void CreateDeviceResource(const ProgramDesc& main_prog UNUSED) override{};
void TrainFiles() override; void TrainFiles() override;
void TrainFilesWithProfiler() override{}; void TrainFilesWithProfiler() override{};
...@@ -693,7 +694,7 @@ class SectionWorker : public DeviceWorker { ...@@ -693,7 +694,7 @@ class SectionWorker : public DeviceWorker {
const platform::Place& place() const { return place_; } const platform::Place& place() const { return place_; }
void SetDeviceIndex(int tid) override {} void SetDeviceIndex(int tid UNUSED) override {}
void SetThreadIndex(int thread_id) { thread_id_ = thread_id; } void SetThreadIndex(int thread_id) { thread_id_ = thread_id; }
void SetMicrobatchNum(int num) { num_microbatches_ = num; } void SetMicrobatchNum(int num) { num_microbatches_ = num; }
void SetPipelineStageNum(int num) { num_pipeline_stages_ = num; } void SetPipelineStageNum(int num) { num_pipeline_stages_ = num; }
...@@ -755,7 +756,7 @@ class HeterSectionWorker : public DeviceWorker { ...@@ -755,7 +756,7 @@ class HeterSectionWorker : public DeviceWorker {
~HeterSectionWorker() override {} ~HeterSectionWorker() override {}
void Initialize(const TrainerDesc& desc) override; void Initialize(const TrainerDesc& desc) override;
void CreateDeviceResource(const ProgramDesc& main_prog) override{}; void CreateDeviceResource(const ProgramDesc& main_prog UNUSED) override{};
void TrainFiles() override; void TrainFiles() override;
void TrainFilesWithProfiler() override; void TrainFilesWithProfiler() override;
......
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#include "paddle/fluid/framework/type_defs.h" #include "paddle/fluid/framework/type_defs.h"
#include "paddle/fluid/imperative/type_defs.h" #include "paddle/fluid/imperative/type_defs.h"
#include "paddle/fluid/platform/enforce.h" #include "paddle/fluid/platform/enforce.h"
#include "paddle/phi/core/macros.h"
namespace paddle { namespace paddle {
namespace framework { namespace framework {
...@@ -83,19 +84,19 @@ class NoNeedBufferVarsInference { ...@@ -83,19 +84,19 @@ class NoNeedBufferVarsInference {
} }
}; };
#define DECLARE_NO_NEED_BUFFER_VARS_INFERER(class_type, ...) \ #define DECLARE_NO_NEED_BUFFER_VARS_INFERER(class_type, ...) \
class class_type final \ class class_type final \
: public ::paddle::framework::NoNeedBufferVarsInference { \ : public ::paddle::framework::NoNeedBufferVarsInference { \
public: \ public: \
using ::paddle::framework::NoNeedBufferVarsInference:: \ using ::paddle::framework::NoNeedBufferVarsInference:: \
NoNeedBufferVarsInference; \ NoNeedBufferVarsInference; \
\ \
const std::unordered_set<std::string> &operator()( \ const std::unordered_set<std::string> &operator()( \
const ::paddle::framework::InferNoNeedBufferVarsContext &ctx) \ const ::paddle::framework::InferNoNeedBufferVarsContext &ctx \
const final { \ UNUSED) const final { \
static std::unordered_set<std::string> __ret__{__VA_ARGS__}; \ static std::unordered_set<std::string> __ret__{__VA_ARGS__}; \
return __ret__; \ return __ret__; \
} \ } \
} }
class InferNoNeedBufferVarsFN { class InferNoNeedBufferVarsFN {
......
...@@ -148,7 +148,7 @@ class OpRegistry { ...@@ -148,7 +148,7 @@ class OpRegistry {
}; };
template <typename PlaceType> template <typename PlaceType>
inline void CheckKernelLaunch(const char* op_type) {} inline void CheckKernelLaunch(const char* op_type UNUSED) {}
#ifdef PADDLE_WITH_CUDA #ifdef PADDLE_WITH_CUDA
template <> template <>
......
...@@ -1365,7 +1365,7 @@ struct KernelRegistrar { ...@@ -1365,7 +1365,7 @@ struct KernelRegistrar {
return 0; \ return 0; \
} \ } \
void __PD_KERNEL_args_def_FN_##kernel_name##_##backend##_##layout( \ void __PD_KERNEL_args_def_FN_##kernel_name##_##backend##_##layout( \
const ::phi::KernelKey& kernel_key, ::phi::Kernel* kernel) const ::phi::KernelKey& kernel_key UNUSED, ::phi::Kernel* kernel UNUSED)
#else #else
#define __PD_REGISTER_KERNEL_FOR_ALL_DTYPE( \ #define __PD_REGISTER_KERNEL_FOR_ALL_DTYPE( \
reg_type, kernel_name, backend, layout, kernel_fn) \ reg_type, kernel_name, backend, layout, kernel_fn) \
......
...@@ -12,17 +12,17 @@ ...@@ -12,17 +12,17 @@
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "paddle/phi/kernels/empty_kernel.h" #include "paddle/phi/kernels/empty_kernel.h"
#include "paddle/phi/backends/all_context.h" #include "paddle/phi/backends/all_context.h"
#include "paddle/phi/common/complex.h" #include "paddle/phi/common/complex.h"
#include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/macros.h"
namespace phi { namespace phi {
template <typename T, typename Context> template <typename T, typename Context>
void EmptyKernel(const Context& dev_ctx, void EmptyKernel(const Context& dev_ctx,
const IntArray& shape, const IntArray& shape,
DataType dtype, DataType dtype UNUSED,
DenseTensor* out) { DenseTensor* out) {
out->Resize(phi::make_ddim(shape.GetData())); out->Resize(phi::make_ddim(shape.GetData()));
dev_ctx.template Alloc<T>(out); dev_ctx.template Alloc<T>(out);
...@@ -31,7 +31,7 @@ void EmptyKernel(const Context& dev_ctx, ...@@ -31,7 +31,7 @@ void EmptyKernel(const Context& dev_ctx,
template <typename T, typename Context> template <typename T, typename Context>
void EmptyLikeKernel(const Context& dev_ctx, void EmptyLikeKernel(const Context& dev_ctx,
const DenseTensor& x, const DenseTensor& x,
DataType dtype, DataType dtype UNUSED,
DenseTensor* out) { DenseTensor* out) {
dev_ctx.template Alloc<T>(out); dev_ctx.template Alloc<T>(out);
} }
......
...@@ -17,7 +17,7 @@ limitations under the License. */ ...@@ -17,7 +17,7 @@ limitations under the License. */
#include <string> #include <string>
#include <unordered_set> #include <unordered_set>
#include <vector> #include <vector>
#include "paddle/phi/core/macros.h"
namespace phi { namespace phi {
namespace funcs { namespace funcs {
...@@ -33,7 +33,7 @@ struct BinaryCompoundFunctor { ...@@ -33,7 +33,7 @@ struct BinaryCompoundFunctor {
return func1_(x, intermediat_out); return func1_(x, intermediat_out);
} }
inline HOSTDEVICE T GetIntermediateOut(T x, T y) { return func2_(y); } inline HOSTDEVICE T GetIntermediateOut(T x UNUSED, T y) { return func2_(y); }
BinaryFunctor func1_; BinaryFunctor func1_;
UnaryFunctor func2_; UnaryFunctor func2_;
...@@ -64,16 +64,18 @@ struct BinaryCompoundGradDxFunctor { ...@@ -64,16 +64,18 @@ struct BinaryCompoundGradDxFunctor {
const UnaryFun &unary_fun) const UnaryFun &unary_fun)
: d_binary_fun_(d_binary_fun), unary_fun_(unary_fun) {} : d_binary_fun_(d_binary_fun), unary_fun_(unary_fun) {}
inline HOSTDEVICE T Recompute(T x, T y, T out, T dout) { inline HOSTDEVICE T Recompute(T x, T y, T out UNUSED, T dout) {
return dout * d_binary_fun_.Dx(x, unary_fun_(y)); return dout * d_binary_fun_.Dx(x, unary_fun_(y));
} }
inline HOSTDEVICE T inline HOSTDEVICE T UseIntermediateOut(
UseIntermediateOut(T x, T y, T intermediate_out, T out, T dout) { T x, T y UNUSED, T intermediate_out, T out UNUSED, T dout) {
return dout * d_binary_fun_.Dx(x, intermediate_out); return dout * d_binary_fun_.Dx(x, intermediate_out);
} }
inline HOSTDEVICE T GetIntermediateOut(T x, T y) { return unary_fun_(y); } inline HOSTDEVICE T GetIntermediateOut(T x UNUSED, T y) {
return unary_fun_(y);
}
private: private:
DBinaryFun d_binary_fun_; DBinaryFun d_binary_fun_;
...@@ -210,13 +212,13 @@ struct BinaryCompoundGradDIntermedaiteOutFunctor { ...@@ -210,13 +212,13 @@ struct BinaryCompoundGradDIntermedaiteOutFunctor {
const UnaryFun &unary_fun) const UnaryFun &unary_fun)
: d_binary_fun_(d_binary_fun), unary_fun_(unary_fun) {} : d_binary_fun_(d_binary_fun), unary_fun_(unary_fun) {}
inline HOSTDEVICE T Recompute(T x, T y, T out, T dout) { inline HOSTDEVICE T Recompute(T x, T y, T out UNUSED, T dout) {
return dout * d_binary_fun_.Dy(x, unary_fun_(y)); return dout * d_binary_fun_.Dy(x, unary_fun_(y));
} }
inline HOSTDEVICE T UseIntermediateOut(T x, inline HOSTDEVICE T UseIntermediateOut(T x,
T intermediate_out, T intermediate_out,
T out, T out UNUSED,
T dout) { T dout) {
return dout * d_binary_fun_.Dy(x, intermediate_out); return dout * d_binary_fun_.Dy(x, intermediate_out);
} }
......
...@@ -18,6 +18,7 @@ limitations under the License. */ ...@@ -18,6 +18,7 @@ limitations under the License. */
#include "paddle/phi/common/float16.h" #include "paddle/phi/common/float16.h"
#include "paddle/phi/core/enforce.h" #include "paddle/phi/core/enforce.h"
#include "paddle/phi/core/hostdevice.h" #include "paddle/phi/core/hostdevice.h"
#include "paddle/phi/core/macros.h"
#if defined(__xpu__) #if defined(__xpu__)
#include <xpu/runtime.h> #include <xpu/runtime.h>
...@@ -259,7 +260,7 @@ template <> ...@@ -259,7 +260,7 @@ template <>
struct FMaxGradDx<dtype::float16> { struct FMaxGradDx<dtype::float16> {
HOSTDEVICE dtype::float16 operator()(dtype::float16 x, HOSTDEVICE dtype::float16 operator()(dtype::float16 x,
dtype::float16 y, dtype::float16 y,
dtype::float16 out, dtype::float16 out UNUSED,
dtype::float16 dout) const { dtype::float16 dout) const {
return dout * static_cast<dtype::float16>((x >= y) || dtype::isnan(y)); return dout * static_cast<dtype::float16>((x >= y) || dtype::isnan(y));
} }
...@@ -267,7 +268,7 @@ struct FMaxGradDx<dtype::float16> { ...@@ -267,7 +268,7 @@ struct FMaxGradDx<dtype::float16> {
template <> template <>
struct FMaxGradDx<int> { struct FMaxGradDx<int> {
HOSTDEVICE int operator()(int x, int y, int out, int dout) const { HOSTDEVICE int operator()(int x, int y, int out UNUSED, int dout) const {
return dout * static_cast<int>((x >= y)); return dout * static_cast<int>((x >= y));
} }
}; };
...@@ -276,7 +277,7 @@ template <> ...@@ -276,7 +277,7 @@ template <>
struct FMaxGradDx<int64_t> { struct FMaxGradDx<int64_t> {
HOSTDEVICE int64_t operator()(int64_t x, HOSTDEVICE int64_t operator()(int64_t x,
int64_t y, int64_t y,
int64_t out, int64_t out UNUSED,
int64_t dout) const { int64_t dout) const {
return dout * static_cast<int64_t>((x >= y)); return dout * static_cast<int64_t>((x >= y));
} }
...@@ -293,7 +294,7 @@ template <> ...@@ -293,7 +294,7 @@ template <>
struct FMaxGradDy<dtype::float16> { struct FMaxGradDy<dtype::float16> {
HOSTDEVICE dtype::float16 operator()(dtype::float16 x, HOSTDEVICE dtype::float16 operator()(dtype::float16 x,
dtype::float16 y, dtype::float16 y,
dtype::float16 out, dtype::float16 out UNUSED,
dtype::float16 dout) const { dtype::float16 dout) const {
return dout * static_cast<dtype::float16>(!((x >= y) || dtype::isnan(y))); return dout * static_cast<dtype::float16>(!((x >= y) || dtype::isnan(y)));
} }
...@@ -303,7 +304,7 @@ template <> ...@@ -303,7 +304,7 @@ template <>
struct FMaxGradDy<int64_t> { struct FMaxGradDy<int64_t> {
HOSTDEVICE int64_t operator()(int64_t x, HOSTDEVICE int64_t operator()(int64_t x,
int64_t y, int64_t y,
int64_t out, int64_t out UNUSED,
int64_t dout) const { int64_t dout) const {
return dout * static_cast<int64_t>(!((x >= y))); return dout * static_cast<int64_t>(!((x >= y)));
} }
...@@ -311,7 +312,7 @@ struct FMaxGradDy<int64_t> { ...@@ -311,7 +312,7 @@ struct FMaxGradDy<int64_t> {
template <> template <>
struct FMaxGradDy<int> { struct FMaxGradDy<int> {
HOSTDEVICE int operator()(int x, int y, int out, int dout) const { HOSTDEVICE int operator()(int x, int y, int out UNUSED, int dout) const {
return dout * static_cast<int>(!((x >= y))); return dout * static_cast<int>(!((x >= y)));
} }
}; };
...@@ -327,7 +328,7 @@ template <> ...@@ -327,7 +328,7 @@ template <>
struct FMinGradDx<dtype::float16> { struct FMinGradDx<dtype::float16> {
HOSTDEVICE dtype::float16 operator()(dtype::float16 x, HOSTDEVICE dtype::float16 operator()(dtype::float16 x,
dtype::float16 y, dtype::float16 y,
dtype::float16 out, dtype::float16 out UNUSED,
dtype::float16 dout) const { dtype::float16 dout) const {
return dout * static_cast<dtype::float16>((x <= y) || dtype::isnan(y)); return dout * static_cast<dtype::float16>((x <= y) || dtype::isnan(y));
} }
...@@ -335,7 +336,7 @@ struct FMinGradDx<dtype::float16> { ...@@ -335,7 +336,7 @@ struct FMinGradDx<dtype::float16> {
template <> template <>
struct FMinGradDx<int> { struct FMinGradDx<int> {
HOSTDEVICE int operator()(int x, int y, int out, int dout) const { HOSTDEVICE int operator()(int x, int y, int out UNUSED, int dout) const {
return dout * static_cast<int>((x <= y)); return dout * static_cast<int>((x <= y));
} }
}; };
...@@ -344,7 +345,7 @@ template <> ...@@ -344,7 +345,7 @@ template <>
struct FMinGradDx<int64_t> { struct FMinGradDx<int64_t> {
HOSTDEVICE int64_t operator()(int64_t x, HOSTDEVICE int64_t operator()(int64_t x,
int64_t y, int64_t y,
int64_t out, int64_t out UNUSED,
int64_t dout) const { int64_t dout) const {
return dout * static_cast<int64_t>((x <= y)); return dout * static_cast<int64_t>((x <= y));
} }
...@@ -361,7 +362,7 @@ template <> ...@@ -361,7 +362,7 @@ template <>
struct FMinGradDy<dtype::float16> { struct FMinGradDy<dtype::float16> {
HOSTDEVICE dtype::float16 operator()(dtype::float16 x, HOSTDEVICE dtype::float16 operator()(dtype::float16 x,
dtype::float16 y, dtype::float16 y,
dtype::float16 out, dtype::float16 out UNUSED,
dtype::float16 dout) const { dtype::float16 dout) const {
return dout * static_cast<dtype::float16>(!((x <= y) || dtype::isnan(y))); return dout * static_cast<dtype::float16>(!((x <= y) || dtype::isnan(y)));
} }
...@@ -369,7 +370,7 @@ struct FMinGradDy<dtype::float16> { ...@@ -369,7 +370,7 @@ struct FMinGradDy<dtype::float16> {
template <> template <>
struct FMinGradDy<int> { struct FMinGradDy<int> {
HOSTDEVICE int operator()(int x, int y, int out, int dout) const { HOSTDEVICE int operator()(int x, int y, int out UNUSED, int dout) const {
return dout * static_cast<int>(!((x <= y))); return dout * static_cast<int>(!((x <= y)));
} }
}; };
...@@ -378,7 +379,7 @@ template <> ...@@ -378,7 +379,7 @@ template <>
struct FMinGradDy<int64_t> { struct FMinGradDy<int64_t> {
HOSTDEVICE int64_t operator()(int64_t x, HOSTDEVICE int64_t operator()(int64_t x,
int64_t y, int64_t y,
int64_t out, int64_t out UNUSED,
int64_t dout) const { int64_t dout) const {
return dout * static_cast<int64_t>(!((x <= y))); return dout * static_cast<int64_t>(!((x <= y)));
} }
......
...@@ -15,7 +15,7 @@ limitations under the License. */ ...@@ -15,7 +15,7 @@ limitations under the License. */
#pragma once #pragma once
#include "paddle/phi/backends/all_context.h" #include "paddle/phi/backends/all_context.h"
#include "paddle/phi/backends/gpu/gpu_launch_config.h" #include "paddle/phi/backends/gpu/gpu_launch_config.h"
#include "paddle/phi/core/macros.h"
namespace phi { namespace phi {
namespace funcs { namespace funcs {
...@@ -29,7 +29,8 @@ struct ForRange { ...@@ -29,7 +29,8 @@ struct ForRange {
template <> template <>
struct ForRange<phi::CPUContext> { struct ForRange<phi::CPUContext> {
ForRange(const phi::CPUContext& dev_ctx, size_t limit) : limit_(limit) {} ForRange(const phi::CPUContext& dev_ctx UNUSED, size_t limit)
: limit_(limit) {}
template <typename Function> template <typename Function>
void operator()(Function func) const { void operator()(Function func) const {
......
...@@ -14,9 +14,9 @@ ...@@ -14,9 +14,9 @@
#pragma once #pragma once
#include "paddle/phi/core/macros.h"
#include "paddle/phi/kernels/funcs/eigen/common.h" #include "paddle/phi/kernels/funcs/eigen/common.h"
#include "paddle/phi/kernels/funcs/eigen/eigen_function.h" #include "paddle/phi/kernels/funcs/eigen/eigen_function.h"
namespace phi { namespace phi {
namespace funcs { namespace funcs {
...@@ -130,12 +130,12 @@ struct SumGradFunctor { ...@@ -130,12 +130,12 @@ struct SumGradFunctor {
typename DY, typename DY,
typename Dim> typename Dim>
void operator()(const DeviceContext& place, void operator()(const DeviceContext& place,
X* x, X* x UNUSED,
Y* y, Y* y UNUSED,
DX* dx, DX* dx,
DY* dy, DY* dy,
const Dim& dim, const Dim& dim,
int size) { int size UNUSED) {
dx->device(place) = dy->broadcast(dim); dx->device(place) = dy->broadcast(dim);
} }
}; };
...@@ -171,7 +171,7 @@ struct MaxOrMinGradFunctor { ...@@ -171,7 +171,7 @@ struct MaxOrMinGradFunctor {
DX* dx, DX* dx,
DY* dy, DY* dy,
const Dim& dim, const Dim& dim,
int size) { int size UNUSED) {
auto equals = (*x) == y->broadcast(dim); auto equals = (*x) == y->broadcast(dim);
auto ones = dx->constant(1); auto ones = dx->constant(1);
auto zeros = dx->constant(0); auto zeros = dx->constant(0);
......
...@@ -15,15 +15,15 @@ ...@@ -15,15 +15,15 @@
#pragma once #pragma once
#include "paddle/phi/core/dense_tensor.h" #include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/macros.h"
#include "paddle/phi/kernels/funcs/eigen/common.h" #include "paddle/phi/kernels/funcs/eigen/common.h"
#include "paddle/phi/kernels/funcs/eigen/eigen_function.h" #include "paddle/phi/kernels/funcs/eigen/eigen_function.h"
#include "paddle/phi/kernels/meshgrid_grad_kernel.h" #include "paddle/phi/kernels/meshgrid_grad_kernel.h"
namespace phi { namespace phi {
template <typename T, typename Context, int Rank> template <typename T, typename Context, int Rank>
void MeshgridBackward(const Context& ctx, void MeshgridBackward(const Context& ctx,
const std::vector<const DenseTensor*>& ins, const std::vector<const DenseTensor*>& ins UNUSED,
const std::vector<const DenseTensor*>& out_grad, const std::vector<const DenseTensor*>& out_grad,
std::vector<DenseTensor*> outs) { std::vector<DenseTensor*> outs) {
int n = out_grad.size(); int n = out_grad.size();
......
...@@ -14,10 +14,10 @@ ...@@ -14,10 +14,10 @@
#pragma once #pragma once
#include "paddle/phi/core/macros.h"
#include "paddle/phi/kernels/cast_kernel.h" #include "paddle/phi/kernels/cast_kernel.h"
#include "paddle/phi/kernels/empty_kernel.h" #include "paddle/phi/kernels/empty_kernel.h"
#include "paddle/phi/kernels/funcs/reduce_grad_functions.h" #include "paddle/phi/kernels/funcs/reduce_grad_functions.h"
namespace phi { namespace phi {
template <typename Context, template <typename Context,
...@@ -27,11 +27,11 @@ template <typename Context, ...@@ -27,11 +27,11 @@ template <typename Context,
bool kNoNeedBufferY = false> bool kNoNeedBufferY = false>
void ComputeFromInput(const Context& dev_ctx, void ComputeFromInput(const Context& dev_ctx,
const DenseTensor& x, const DenseTensor& x,
const DenseTensor& out_grad, const DenseTensor& out_grad UNUSED,
const paddle::optional<DenseTensor>& out, const paddle::optional<DenseTensor>& out,
const DenseTensor& input2, const DenseTensor& input2,
const std::vector<int64_t>& dims, const std::vector<int64_t>& dims,
bool keep_dim, bool keep_dim UNUSED,
bool reduce_all, bool reduce_all,
DenseTensor* x_grad) { DenseTensor* x_grad) {
reduce_all = recompute_reduce_all(x, dims, reduce_all); reduce_all = recompute_reduce_all(x, dims, reduce_all);
......
...@@ -54,7 +54,7 @@ void SetValueGradImpl(const Context& dev_ctx, ...@@ -54,7 +54,7 @@ void SetValueGradImpl(const Context& dev_ctx,
const IntArray& steps, const IntArray& steps,
const std::vector<int64_t>& axes, const std::vector<int64_t>& axes,
const std::vector<int64_t>& decrease_axes, const std::vector<int64_t>& decrease_axes,
const std::vector<int64_t>& none_axes, const std::vector<int64_t>& none_axes UNUSED,
DenseTensor* x_grad, DenseTensor* x_grad,
DenseTensor* value_grad) { DenseTensor* value_grad) {
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
......
...@@ -17,8 +17,8 @@ ...@@ -17,8 +17,8 @@
#include "paddle/phi/backends/onednn/onednn_helper.h" #include "paddle/phi/backends/onednn/onednn_helper.h"
#include "paddle/phi/backends/onednn/onednn_reuse.h" #include "paddle/phi/backends/onednn/onednn_reuse.h"
#include "paddle/phi/core/expect.h" #include "paddle/phi/core/expect.h"
#include "paddle/phi/core/macros.h"
#include "paddle/phi/kernels/cpu/conv_util.h" #include "paddle/phi/kernels/cpu/conv_util.h"
namespace phi { namespace phi {
namespace onednn { namespace onednn {
...@@ -50,7 +50,7 @@ class ConvOneDNNHandlerT ...@@ -50,7 +50,7 @@ class ConvOneDNNHandlerT
const std::string& padding_algorithm, const std::string& padding_algorithm,
const std::vector<int>& dilations_in, const std::vector<int>& dilations_in,
int groups, int groups,
const std::string& data_format, const std::string& data_format UNUSED,
bool is_test, bool is_test,
bool is_BFLOAT16, bool is_BFLOAT16,
const std::string& fuse_activation, const std::string& fuse_activation,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册