未验证 提交 f9e5072b 编写于 作者: G Galaxy1458 提交者: GitHub

remove some [-Wunused-parameter] waring (#53319)

* test,test=develop

* test,test=develop

* test,test=develop

* test,test=develop

* test,test=develop

* test,test=develop

* test,test=develop
上级 cd88156a
......@@ -31,6 +31,7 @@
#include "paddle/fluid/framework/scope.h"
#include "paddle/fluid/platform/device_context.h"
#include "paddle/fluid/platform/place.h"
#include "paddle/phi/core/macros.h"
namespace google {
namespace protobuf {
......@@ -96,7 +97,9 @@ class PSServer {
virtual int32_t StartS2S() { return 0; }
virtual ::std::future<int32_t> SendPServer2PServerMsg(
int msg_type, int to_pserver_id, const std::string &msg) {
int msg_type UNUSED,
int to_pserver_id UNUSED,
const std::string &msg UNUSED) {
LOG(FATAL) << "NotImplementError: PSServer::send_pserver2pserver_msg";
std::promise<int32_t> promise;
std::future<int> fut = promise.get_future();
......@@ -124,9 +127,9 @@ class PSServer {
}
return itr->second(msg_type, from_pserver_id, msg);
}
virtual int32_t ReceiveFromPServer(int msg_type,
int pserver_id,
const std::string &msg) {
virtual int32_t ReceiveFromPServer(int msg_type UNUSED,
int pserver_id UNUSED,
const std::string &msg UNUSED) {
LOG(FATAL) << "NotImplementError::PSServer::ReceiveFromPServer";
return -1;
}
......
......@@ -70,18 +70,20 @@ class BarrierTable : public Table {
BarrierTable() {}
virtual ~BarrierTable() {}
virtual void *GetShard(size_t shard_idx) { return 0; }
virtual void *GetShard(size_t shard_idx UNUSED) { return 0; }
virtual int32_t Pull(TableContext &context) { return 0; } // NOLINT
virtual int32_t Push(TableContext &context) { return 0; } // NOLINT
virtual int32_t Pull(TableContext &context UNUSED) { return 0; } // NOLINT
virtual int32_t Push(TableContext &context UNUSED) { return 0; } // NOLINT
int32_t Shrink(const std::string &param) override { return 0; }
int32_t Shrink(const std::string &param UNUSED) override { return 0; }
virtual void Clear() {}
virtual int32_t Flush() { return 0; }
virtual int32_t Load(const std::string &path, const std::string &param) {
virtual int32_t Load(const std::string &path UNUSED,
const std::string &param UNUSED) {
return 0;
}
virtual int32_t Save(const std::string &path, const std::string &param) {
virtual int32_t Save(const std::string &path UNUSED,
const std::string &param UNUSED) {
return 0;
}
virtual int32_t InitializeShard() { return 0; }
......
......@@ -32,6 +32,7 @@
#include "paddle/fluid/platform/device_context.h"
#include "paddle/fluid/platform/place.h"
#include "paddle/fluid/string/string_helper.h"
#include "paddle/phi/core/macros.h"
namespace paddle {
namespace distributed {
......@@ -77,22 +78,22 @@ class Table {
virtual int32_t Push(TableContext &context) = 0; // NOLINT
// only for barrier
virtual int32_t Barrier(const uint32_t trainer_id,
const std::string barrier_type) {
virtual int32_t Barrier(const uint32_t trainer_id UNUSED,
const std::string barrier_type UNUSED) {
return 0;
}
// only for barrier table
virtual int32_t SetTableMap(
std::unordered_map<uint32_t, std::shared_ptr<Table>> *table_map) {
std::unordered_map<uint32_t, std::shared_ptr<Table>> *table_map UNUSED) {
return 0;
}
// only for tensor table
virtual int32_t SetProgramEnv(
framework::Scope *scope,
platform::Place place,
const std::vector<framework::ProgramDesc> *sub_program) {
framework::Scope *scope UNUSED,
platform::Place place UNUSED,
const std::vector<framework::ProgramDesc> *sub_program UNUSED) {
return 0;
}
......@@ -115,23 +116,23 @@ class Table {
const std::string &converter) = 0;
// for cache
virtual int32_t SaveCache(
const std::string &path,
const std::string &param,
const std::string &path UNUSED,
const std::string &param UNUSED,
paddle::framework::Channel<std::pair<uint64_t, std::string>>
&shuffled_channel) {
&shuffled_channel UNUSED) {
return 0;
}
virtual int64_t CacheShuffle(
const std::string &path,
const std::string &param,
double cache_threshold,
const std::string &path UNUSED,
const std::string &param UNUSED,
double cache_threshold UNUSED,
std::function<std::future<int32_t>(
int msg_type, int to_pserver_id, std::string &msg)> // NOLINT
send_msg_func,
send_msg_func UNUSED,
paddle::framework::Channel<std::pair<uint64_t, std::string>>
&shuffled_channel,
const std::vector<Table *> &table_ptrs) {
&shuffled_channel UNUSED,
const std::vector<Table *> &table_ptrs UNUSED) {
return 0;
}
......@@ -149,7 +150,7 @@ class Table {
virtual void *GetShard(size_t shard_idx) = 0;
virtual std::pair<int64_t, int64_t> PrintTableStat() { return {0, 0}; }
virtual int32_t CacheTable(uint16_t pass_id) { return 0; }
virtual int32_t CacheTable(uint16_t pass_id UNUSED) { return 0; }
// for patch model
virtual void Revert() {}
......
......@@ -402,22 +402,22 @@ class CustomParser {
virtual void Init(const std::vector<SlotConf>& slots) = 0;
virtual bool Init(const std::vector<AllSlotInfo>& slots) = 0;
virtual void ParseOneInstance(const char* str, Record* instance) = 0;
virtual int ParseInstance(int len,
const char* str,
std::vector<Record>* instances) {
virtual int ParseInstance(int len UNUSED,
const char* str UNUSED,
std::vector<Record>* instances UNUSED) {
return 0;
}
virtual bool ParseOneInstance(
const std::string& line,
std::function<void(std::vector<SlotRecord>&, int)>
GetInsFunc) { // NOLINT
const std::string& line UNUSED,
std::function<void(std::vector<SlotRecord>&, int)> GetInsFunc
UNUSED) { // NOLINT
return true;
}
virtual bool ParseFileInstance(
std::function<int(char* buf, int len)> ReadBuffFunc,
std::function<void(std::vector<SlotRecord>&, int, int)>
PullRecordsFunc, // NOLINT
int& lines) { // NOLINT
std::function<int(char* buf, int len)> ReadBuffFunc UNUSED,
std::function<void(std::vector<SlotRecord>&, int, int)> PullRecordsFunc
UNUSED, // NOLINT
int& lines UNUSED) { // NOLINT
return false;
}
};
......@@ -1267,7 +1267,8 @@ class DataFeed {
virtual void SetInsIdVec(MiniBatchGpuPack* pack) {}
#endif
virtual void DumpWalkPath(std::string dump_path, size_t dump_rate) {
virtual void DumpWalkPath(std::string dump_path UNUSED,
size_t dump_rate UNUSED) {
PADDLE_THROW(platform::errors::Unimplemented(
"This function(DumpWalkPath) is not implemented."));
}
......
......@@ -25,6 +25,7 @@
#include <unordered_set>
#include <utility>
#include <vector>
#include "paddle/phi/core/macros.h"
#ifdef PADDLE_WITH_GLOO
#include <gloo/broadcast.h>
......@@ -51,12 +52,12 @@ class Dataset {
Dataset() {}
virtual ~Dataset() {}
// do sample
virtual void TDMSample(const std::string tree_name,
const std::string tree_path,
const std::vector<uint16_t> tdm_layer_counts,
const uint16_t start_sample_layer,
const bool with_hierachy,
const uint16_t seed_,
virtual void TDMSample(const std::string tree_name UNUSED,
const std::string tree_path UNUSED,
const std::vector<uint16_t> tdm_layer_counts UNUSED,
const uint16_t start_sample_layer UNUSED,
const bool with_hierachy UNUSED,
const uint16_t seed_ UNUSED,
const uint16_t sample_slot) {}
// set file list
virtual void SetFileList(const std::vector<std::string>& filelist) = 0;
......@@ -238,8 +239,9 @@ class DatasetImpl : public Dataset {
virtual void WaitPreLoadDone();
virtual void ReleaseMemory();
virtual void LocalShuffle();
virtual void GlobalShuffle(int thread_num = -1) {}
virtual void SlotsShuffle(const std::set<std::string>& slots_to_replace) {}
virtual void GlobalShuffle(int thread_num UNUSED = -1) {}
virtual void SlotsShuffle(
const std::set<std::string>& slots_to_replace UNUSED) {}
virtual const std::vector<T>& GetSlotsOriginalData() {
return slots_shuffle_original_data_;
}
......@@ -251,12 +253,12 @@ class DatasetImpl : public Dataset {
virtual void MergeByInsId() {}
virtual void PreprocessInstance() {}
virtual void PostprocessInstance() {}
virtual void SetCurrentPhase(int current_phase) {}
virtual void GenerateLocalTablesUnlock(int table_id,
int feadim,
int read_thread_num,
int consume_thread_num,
int shard_num) {}
virtual void SetCurrentPhase(int current_phase UNUSED) {}
virtual void GenerateLocalTablesUnlock(int table_id UNUSED,
int feadim UNUSED,
int read_thread_num UNUSED,
int consume_thread_num UNUSED,
int shard_num UNUSED) {}
virtual void ClearLocalTables() {}
virtual void CreatePreLoadReaders();
virtual void DestroyPreLoadReaders();
......@@ -288,9 +290,9 @@ class DatasetImpl : public Dataset {
virtual uint32_t GetPassID() { return pass_id_; }
protected:
virtual int ReceiveFromClient(int msg_type,
int client_id,
const std::string& msg) {
virtual int ReceiveFromClient(int msg_type UNUSED,
int client_id UNUSED,
const std::string& msg UNUSED) {
// TODO(yaoxuefeng) for SlotRecordDataset
return -1;
}
......
......@@ -44,6 +44,7 @@ limitations under the License. */
#include "paddle/fluid/platform/place.h"
#include "paddle/fluid/platform/timer.h"
#include "paddle/phi/backends/dynload/port.h"
#include "paddle/phi/core/macros.h"
namespace paddle {
namespace framework {
......@@ -179,14 +180,14 @@ class DeviceWorker {
virtual void BindingDataFeedMemory() = 0;
virtual void SetRootScope(Scope* root_scope);
virtual void SetDataFeed(DataFeed* data_feed);
virtual void SetWorkerNum(int num) {}
virtual void CacheProgram(const ProgramDesc& main_program) {}
virtual void SetWorkerNum(int num UNUSED) {}
virtual void CacheProgram(const ProgramDesc& main_program UNUSED) {}
virtual void ProduceTasks() {}
virtual void GetXpuOpIndex() {}
virtual void Schedule(int taskid) {}
virtual void Schedule(int taskid UNUSED) {}
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
virtual void SetStream(const gpuStream_t stream) {}
virtual void SetEvent(const gpuEvent_t event) {}
virtual void SetStream(const gpuStream_t stream UNUSED) {}
virtual void SetEvent(const gpuEvent_t event UNUSED) {}
#endif
virtual void SetNeedDumpField(bool need_dump_field) {
need_dump_field_ = need_dump_field;
......@@ -256,7 +257,7 @@ class CPUWorkerBase : public DeviceWorker {
virtual void TrainFiles() = 0;
virtual void TrainFilesWithProfiler() {}
virtual void PrintFetchVars() {}
virtual void CreateDeviceResource(const ProgramDesc& main_prog) {}
virtual void CreateDeviceResource(const ProgramDesc& main_prog UNUSED) {}
protected:
int thread_id_;
......@@ -684,7 +685,7 @@ class SectionWorker : public DeviceWorker {
void PrepareUnusedVar();
void BindingDataFeedMemory() override {}
void CreateDeviceResource(const ProgramDesc& main_prog) override{};
void CreateDeviceResource(const ProgramDesc& main_prog UNUSED) override{};
void TrainFiles() override;
void TrainFilesWithProfiler() override{};
......@@ -693,7 +694,7 @@ class SectionWorker : public DeviceWorker {
const platform::Place& place() const { return place_; }
void SetDeviceIndex(int tid) override {}
void SetDeviceIndex(int tid UNUSED) override {}
void SetThreadIndex(int thread_id) { thread_id_ = thread_id; }
void SetMicrobatchNum(int num) { num_microbatches_ = num; }
void SetPipelineStageNum(int num) { num_pipeline_stages_ = num; }
......@@ -755,7 +756,7 @@ class HeterSectionWorker : public DeviceWorker {
~HeterSectionWorker() override {}
void Initialize(const TrainerDesc& desc) override;
void CreateDeviceResource(const ProgramDesc& main_prog) override{};
void CreateDeviceResource(const ProgramDesc& main_prog UNUSED) override{};
void TrainFiles() override;
void TrainFilesWithProfiler() override;
......
......@@ -23,6 +23,7 @@
#include "paddle/fluid/framework/type_defs.h"
#include "paddle/fluid/imperative/type_defs.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/phi/core/macros.h"
namespace paddle {
namespace framework {
......@@ -83,19 +84,19 @@ class NoNeedBufferVarsInference {
}
};
#define DECLARE_NO_NEED_BUFFER_VARS_INFERER(class_type, ...) \
class class_type final \
: public ::paddle::framework::NoNeedBufferVarsInference { \
public: \
using ::paddle::framework::NoNeedBufferVarsInference:: \
NoNeedBufferVarsInference; \
\
const std::unordered_set<std::string> &operator()( \
const ::paddle::framework::InferNoNeedBufferVarsContext &ctx) \
const final { \
static std::unordered_set<std::string> __ret__{__VA_ARGS__}; \
return __ret__; \
} \
#define DECLARE_NO_NEED_BUFFER_VARS_INFERER(class_type, ...) \
class class_type final \
: public ::paddle::framework::NoNeedBufferVarsInference { \
public: \
using ::paddle::framework::NoNeedBufferVarsInference:: \
NoNeedBufferVarsInference; \
\
const std::unordered_set<std::string> &operator()( \
const ::paddle::framework::InferNoNeedBufferVarsContext &ctx \
UNUSED) const final { \
static std::unordered_set<std::string> __ret__{__VA_ARGS__}; \
return __ret__; \
} \
}
class InferNoNeedBufferVarsFN {
......
......@@ -148,7 +148,7 @@ class OpRegistry {
};
template <typename PlaceType>
inline void CheckKernelLaunch(const char* op_type) {}
inline void CheckKernelLaunch(const char* op_type UNUSED) {}
#ifdef PADDLE_WITH_CUDA
template <>
......
......@@ -1365,7 +1365,7 @@ struct KernelRegistrar {
return 0; \
} \
void __PD_KERNEL_args_def_FN_##kernel_name##_##backend##_##layout( \
const ::phi::KernelKey& kernel_key, ::phi::Kernel* kernel)
const ::phi::KernelKey& kernel_key UNUSED, ::phi::Kernel* kernel UNUSED)
#else
#define __PD_REGISTER_KERNEL_FOR_ALL_DTYPE( \
reg_type, kernel_name, backend, layout, kernel_fn) \
......
......@@ -12,17 +12,17 @@
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/phi/kernels/empty_kernel.h"
#include "paddle/phi/backends/all_context.h"
#include "paddle/phi/common/complex.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/macros.h"
namespace phi {
template <typename T, typename Context>
void EmptyKernel(const Context& dev_ctx,
const IntArray& shape,
DataType dtype,
DataType dtype UNUSED,
DenseTensor* out) {
out->Resize(phi::make_ddim(shape.GetData()));
dev_ctx.template Alloc<T>(out);
......@@ -31,7 +31,7 @@ void EmptyKernel(const Context& dev_ctx,
template <typename T, typename Context>
void EmptyLikeKernel(const Context& dev_ctx,
const DenseTensor& x,
DataType dtype,
DataType dtype UNUSED,
DenseTensor* out) {
dev_ctx.template Alloc<T>(out);
}
......
......@@ -17,7 +17,7 @@ limitations under the License. */
#include <string>
#include <unordered_set>
#include <vector>
#include "paddle/phi/core/macros.h"
namespace phi {
namespace funcs {
......@@ -33,7 +33,7 @@ struct BinaryCompoundFunctor {
return func1_(x, intermediat_out);
}
inline HOSTDEVICE T GetIntermediateOut(T x, T y) { return func2_(y); }
inline HOSTDEVICE T GetIntermediateOut(T x UNUSED, T y) { return func2_(y); }
BinaryFunctor func1_;
UnaryFunctor func2_;
......@@ -64,16 +64,18 @@ struct BinaryCompoundGradDxFunctor {
const UnaryFun &unary_fun)
: d_binary_fun_(d_binary_fun), unary_fun_(unary_fun) {}
inline HOSTDEVICE T Recompute(T x, T y, T out, T dout) {
inline HOSTDEVICE T Recompute(T x, T y, T out UNUSED, T dout) {
return dout * d_binary_fun_.Dx(x, unary_fun_(y));
}
inline HOSTDEVICE T
UseIntermediateOut(T x, T y, T intermediate_out, T out, T dout) {
inline HOSTDEVICE T UseIntermediateOut(
T x, T y UNUSED, T intermediate_out, T out UNUSED, T dout) {
return dout * d_binary_fun_.Dx(x, intermediate_out);
}
inline HOSTDEVICE T GetIntermediateOut(T x, T y) { return unary_fun_(y); }
inline HOSTDEVICE T GetIntermediateOut(T x UNUSED, T y) {
return unary_fun_(y);
}
private:
DBinaryFun d_binary_fun_;
......@@ -210,13 +212,13 @@ struct BinaryCompoundGradDIntermedaiteOutFunctor {
const UnaryFun &unary_fun)
: d_binary_fun_(d_binary_fun), unary_fun_(unary_fun) {}
inline HOSTDEVICE T Recompute(T x, T y, T out, T dout) {
inline HOSTDEVICE T Recompute(T x, T y, T out UNUSED, T dout) {
return dout * d_binary_fun_.Dy(x, unary_fun_(y));
}
inline HOSTDEVICE T UseIntermediateOut(T x,
T intermediate_out,
T out,
T out UNUSED,
T dout) {
return dout * d_binary_fun_.Dy(x, intermediate_out);
}
......
......@@ -18,6 +18,7 @@ limitations under the License. */
#include "paddle/phi/common/float16.h"
#include "paddle/phi/core/enforce.h"
#include "paddle/phi/core/hostdevice.h"
#include "paddle/phi/core/macros.h"
#if defined(__xpu__)
#include <xpu/runtime.h>
......@@ -259,7 +260,7 @@ template <>
struct FMaxGradDx<dtype::float16> {
HOSTDEVICE dtype::float16 operator()(dtype::float16 x,
dtype::float16 y,
dtype::float16 out,
dtype::float16 out UNUSED,
dtype::float16 dout) const {
return dout * static_cast<dtype::float16>((x >= y) || dtype::isnan(y));
}
......@@ -267,7 +268,7 @@ struct FMaxGradDx<dtype::float16> {
template <>
struct FMaxGradDx<int> {
HOSTDEVICE int operator()(int x, int y, int out, int dout) const {
HOSTDEVICE int operator()(int x, int y, int out UNUSED, int dout) const {
return dout * static_cast<int>((x >= y));
}
};
......@@ -276,7 +277,7 @@ template <>
struct FMaxGradDx<int64_t> {
HOSTDEVICE int64_t operator()(int64_t x,
int64_t y,
int64_t out,
int64_t out UNUSED,
int64_t dout) const {
return dout * static_cast<int64_t>((x >= y));
}
......@@ -293,7 +294,7 @@ template <>
struct FMaxGradDy<dtype::float16> {
HOSTDEVICE dtype::float16 operator()(dtype::float16 x,
dtype::float16 y,
dtype::float16 out,
dtype::float16 out UNUSED,
dtype::float16 dout) const {
return dout * static_cast<dtype::float16>(!((x >= y) || dtype::isnan(y)));
}
......@@ -303,7 +304,7 @@ template <>
struct FMaxGradDy<int64_t> {
HOSTDEVICE int64_t operator()(int64_t x,
int64_t y,
int64_t out,
int64_t out UNUSED,
int64_t dout) const {
return dout * static_cast<int64_t>(!((x >= y)));
}
......@@ -311,7 +312,7 @@ struct FMaxGradDy<int64_t> {
template <>
struct FMaxGradDy<int> {
HOSTDEVICE int operator()(int x, int y, int out, int dout) const {
HOSTDEVICE int operator()(int x, int y, int out UNUSED, int dout) const {
return dout * static_cast<int>(!((x >= y)));
}
};
......@@ -327,7 +328,7 @@ template <>
struct FMinGradDx<dtype::float16> {
HOSTDEVICE dtype::float16 operator()(dtype::float16 x,
dtype::float16 y,
dtype::float16 out,
dtype::float16 out UNUSED,
dtype::float16 dout) const {
return dout * static_cast<dtype::float16>((x <= y) || dtype::isnan(y));
}
......@@ -335,7 +336,7 @@ struct FMinGradDx<dtype::float16> {
template <>
struct FMinGradDx<int> {
HOSTDEVICE int operator()(int x, int y, int out, int dout) const {
HOSTDEVICE int operator()(int x, int y, int out UNUSED, int dout) const {
return dout * static_cast<int>((x <= y));
}
};
......@@ -344,7 +345,7 @@ template <>
struct FMinGradDx<int64_t> {
HOSTDEVICE int64_t operator()(int64_t x,
int64_t y,
int64_t out,
int64_t out UNUSED,
int64_t dout) const {
return dout * static_cast<int64_t>((x <= y));
}
......@@ -361,7 +362,7 @@ template <>
struct FMinGradDy<dtype::float16> {
HOSTDEVICE dtype::float16 operator()(dtype::float16 x,
dtype::float16 y,
dtype::float16 out,
dtype::float16 out UNUSED,
dtype::float16 dout) const {
return dout * static_cast<dtype::float16>(!((x <= y) || dtype::isnan(y)));
}
......@@ -369,7 +370,7 @@ struct FMinGradDy<dtype::float16> {
template <>
struct FMinGradDy<int> {
HOSTDEVICE int operator()(int x, int y, int out, int dout) const {
HOSTDEVICE int operator()(int x, int y, int out UNUSED, int dout) const {
return dout * static_cast<int>(!((x <= y)));
}
};
......@@ -378,7 +379,7 @@ template <>
struct FMinGradDy<int64_t> {
HOSTDEVICE int64_t operator()(int64_t x,
int64_t y,
int64_t out,
int64_t out UNUSED,
int64_t dout) const {
return dout * static_cast<int64_t>(!((x <= y)));
}
......
......@@ -15,7 +15,7 @@ limitations under the License. */
#pragma once
#include "paddle/phi/backends/all_context.h"
#include "paddle/phi/backends/gpu/gpu_launch_config.h"
#include "paddle/phi/core/macros.h"
namespace phi {
namespace funcs {
......@@ -29,7 +29,8 @@ struct ForRange {
template <>
struct ForRange<phi::CPUContext> {
ForRange(const phi::CPUContext& dev_ctx, size_t limit) : limit_(limit) {}
ForRange(const phi::CPUContext& dev_ctx UNUSED, size_t limit)
: limit_(limit) {}
template <typename Function>
void operator()(Function func) const {
......
......@@ -14,9 +14,9 @@
#pragma once
#include "paddle/phi/core/macros.h"
#include "paddle/phi/kernels/funcs/eigen/common.h"
#include "paddle/phi/kernels/funcs/eigen/eigen_function.h"
namespace phi {
namespace funcs {
......@@ -130,12 +130,12 @@ struct SumGradFunctor {
typename DY,
typename Dim>
void operator()(const DeviceContext& place,
X* x,
Y* y,
X* x UNUSED,
Y* y UNUSED,
DX* dx,
DY* dy,
const Dim& dim,
int size) {
int size UNUSED) {
dx->device(place) = dy->broadcast(dim);
}
};
......@@ -171,7 +171,7 @@ struct MaxOrMinGradFunctor {
DX* dx,
DY* dy,
const Dim& dim,
int size) {
int size UNUSED) {
auto equals = (*x) == y->broadcast(dim);
auto ones = dx->constant(1);
auto zeros = dx->constant(0);
......
......@@ -15,15 +15,15 @@
#pragma once
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/macros.h"
#include "paddle/phi/kernels/funcs/eigen/common.h"
#include "paddle/phi/kernels/funcs/eigen/eigen_function.h"
#include "paddle/phi/kernels/meshgrid_grad_kernel.h"
namespace phi {
template <typename T, typename Context, int Rank>
void MeshgridBackward(const Context& ctx,
const std::vector<const DenseTensor*>& ins,
const std::vector<const DenseTensor*>& ins UNUSED,
const std::vector<const DenseTensor*>& out_grad,
std::vector<DenseTensor*> outs) {
int n = out_grad.size();
......
......@@ -14,10 +14,10 @@
#pragma once
#include "paddle/phi/core/macros.h"
#include "paddle/phi/kernels/cast_kernel.h"
#include "paddle/phi/kernels/empty_kernel.h"
#include "paddle/phi/kernels/funcs/reduce_grad_functions.h"
namespace phi {
template <typename Context,
......@@ -27,11 +27,11 @@ template <typename Context,
bool kNoNeedBufferY = false>
void ComputeFromInput(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& out_grad,
const DenseTensor& out_grad UNUSED,
const paddle::optional<DenseTensor>& out,
const DenseTensor& input2,
const std::vector<int64_t>& dims,
bool keep_dim,
bool keep_dim UNUSED,
bool reduce_all,
DenseTensor* x_grad) {
reduce_all = recompute_reduce_all(x, dims, reduce_all);
......
......@@ -54,7 +54,7 @@ void SetValueGradImpl(const Context& dev_ctx,
const IntArray& steps,
const std::vector<int64_t>& axes,
const std::vector<int64_t>& decrease_axes,
const std::vector<int64_t>& none_axes,
const std::vector<int64_t>& none_axes UNUSED,
DenseTensor* x_grad,
DenseTensor* value_grad) {
PADDLE_ENFORCE_EQ(
......
......@@ -17,8 +17,8 @@
#include "paddle/phi/backends/onednn/onednn_helper.h"
#include "paddle/phi/backends/onednn/onednn_reuse.h"
#include "paddle/phi/core/expect.h"
#include "paddle/phi/core/macros.h"
#include "paddle/phi/kernels/cpu/conv_util.h"
namespace phi {
namespace onednn {
......@@ -50,7 +50,7 @@ class ConvOneDNNHandlerT
const std::string& padding_algorithm,
const std::vector<int>& dilations_in,
int groups,
const std::string& data_format,
const std::string& data_format UNUSED,
bool is_test,
bool is_BFLOAT16,
const std::string& fuse_activation,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册