未验证 提交 65e57a7d 编写于 作者: G Galaxy1458 提交者: GitHub

remove some [-Wunused-parameter] warning and WITH_DISTRIBUT flags (#53650)

* test,test=develop

* test,test=develop

* test,test=develop

* test,test=develop

* test,test=develop
上级 e077678c
......@@ -771,9 +771,7 @@ if(WITH_DISTRIBUTE)
heter_service_proto
fleet_executor
${BRPC_DEP})
set(DISTRIBUTE_COMPILE_FLAGS
"-Wno-non-virtual-dtor -Wno-error=non-virtual-dtor -Wno-error=delete-non-virtual-dtor -Wno-error=parentheses"
)
set(DISTRIBUTE_COMPILE_FLAGS "")
if(CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 7.0)
set(DISTRIBUTE_COMPILE_FLAGS "${DISTRIBUTE_COMPILE_FLAGS} -faligned-new")
endif()
......@@ -855,9 +853,7 @@ if(WITH_DISTRIBUTE)
brpc
fleet_executor
flags)
set(DISTRIBUTE_COMPILE_FLAGS
"-Wno-non-virtual-dtor -Wno-error=non-virtual-dtor -Wno-error=delete-non-virtual-dtor -Wno-error=parentheses"
)
set(DISTRIBUTE_COMPILE_FLAGS "")
if(CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 7.0)
set(DISTRIBUTE_COMPILE_FLAGS "${DISTRIBUTE_COMPILE_FLAGS} -faligned-new")
endif()
......
......@@ -225,7 +225,7 @@ class NCCLOpHandleBase : public OpHandleBase {
void* recvbuff,
size_t count,
ncclDataType_t datatype,
ncclRedOp_t op) {
ncclRedOp_t op UNUSED) {
auto nccl_ctxs = nccl_ctxs_->GetHierarchicalInterCtx(run_order_);
int dev_id = place.device;
auto& nccl_ctx = nccl_ctxs->at(dev_id);
......@@ -297,7 +297,7 @@ class NCCLOpHandleBase : public OpHandleBase {
void* sendbuff,
size_t count,
ncclDataType_t datatype,
ncclRedOp_t op) {
ncclRedOp_t op UNUSED) {
auto nccl_ctxs = nccl_ctxs_->GetHierarchicalInterCtx(run_order_);
int dev_id = place.device;
auto& nccl_ctx = nccl_ctxs->at(dev_id);
......
......@@ -121,11 +121,11 @@ void SaveCombineTensorKernel(const Context& dev_ctx,
template <typename T, typename Context>
void SaveCombineVocabKernel(
const Context& dev_ctx,
const Context& dev_ctx UNUSED,
const std::vector<const phi::ExtendedTensor*>& inputs,
const std::string& file_path,
bool overwrite,
bool save_as_fp16,
bool save_as_fp16 UNUSED,
bool save_to_memory,
phi::ExtendedTensor* out) {
std::string* y = nullptr;
......
......@@ -24,6 +24,8 @@ limitations under the License. */
#include <stdlib.h>
#include <string.h>
#include "paddle/phi/core/macros.h"
#if (defined(__NVCC__) || defined(__HIPCC__))
#define HOSTDEVICE __host__ __device__
#define DEVICE __device__
......@@ -181,7 +183,9 @@ HOSTDEVICE static inline size_t PD_PString_ToInternalSizeT(
/*
* Need to implement in other source file.
*/
HOSTDEVICE static inline void PD_Free(void *ptr, size_t size) { free(ptr); }
HOSTDEVICE static inline void PD_Free(void *ptr, size_t size UNUSED) {
free(ptr);
}
HOSTDEVICE static inline void *PD_Memset(void *src, int ch, size_t size) {
char *dst = (char *)src; // NOLINT
......@@ -203,7 +207,7 @@ HOSTDEVICE static inline void *PD_Memcpy(void *dst,
HOSTDEVICE static inline void *PD_Malloc(size_t size) { return malloc(size); }
HOSTDEVICE static inline void *PD_Realloc(void *ptr,
size_t old_size,
size_t old_size UNUSED,
size_t new_size) {
#if (defined(__NVCC__) || defined(__HIPCC__))
if (old_size >= new_size) {
......
......@@ -24,20 +24,20 @@
namespace phi {
template <typename T, typename Context>
void PRecvKernel(const Context& dev_ctx,
int peer,
DataType dtype,
bool dynamic_shape,
DenseTensor* out) {
void PRecvKernel(const Context& dev_ctx UNUSED,
int peer UNUSED,
DataType dtype UNUSED,
bool dynamic_shape UNUSED,
DenseTensor* out UNUSED) {
PADDLE_THROW(errors::Unavailable("Do not support recv for cpu kernel now."));
}
template <typename T, typename Context>
void PRecvArrayKernel(const Context& dev_ctx,
int peer,
DataType dtype,
const std::vector<int>& out_shape,
TensorArray* out_array) {
void PRecvArrayKernel(const Context& dev_ctx UNUSED,
int peer UNUSED,
DataType dtype UNUSED,
const std::vector<int>& out_shape UNUSED,
TensorArray* out_array UNUSED) {
PADDLE_THROW(
errors::Unavailable("Do not support recv array for cpu kernel now."));
}
......
......@@ -24,19 +24,19 @@
namespace phi {
template <typename T, typename Context>
void PSendKernel(const Context& dev_ctx,
const DenseTensor& x,
int peer,
bool dynamic_shape) {
void PSendKernel(const Context& dev_ctx UNUSED,
const DenseTensor& x UNUSED,
int peer UNUSED,
bool dynamic_shape UNUSED) {
PADDLE_THROW(errors::Unavailable("Do not support send for cpu kernel now."));
}
template <typename T, typename Context>
void PSendArrayKernel(const Context& dev_ctx,
const TensorArray& x,
int peer,
bool dynamic_shape,
DenseTensor* out) {
void PSendArrayKernel(const Context& dev_ctx UNUSED,
const TensorArray& x UNUSED,
int peer UNUSED,
bool dynamic_shape UNUSED,
DenseTensor* out UNUSED) {
PADDLE_THROW(
errors::Unavailable("Do not support send array for cpu kernel now."));
}
......
......@@ -553,28 +553,28 @@ struct GradLayer {
}
virtual void operator()(
const CPUContext& dev_ctx,
const DenseTensor* input,
const DenseTensor* output,
const std::vector<DenseTensor>& init_h_unbind,
const std::vector<DenseTensor>& init_c_unbind,
const std::vector<DenseTensor>& last_h_grad_unbind,
const std::vector<DenseTensor>& last_c_grad_unbind,
const std::vector<DenseTensor>& gate_tensor_unbind,
const std::vector<DenseTensor>& state_tensor_unbind,
const std::vector<DenseTensor>& act_state_tensor_unbind,
const DenseTensor* output_grad,
const std::vector<std::vector<DenseTensor>>& parameter_lists,
const DenseTensor* sequence_length,
DenseTensor* input_grad,
std::vector<DenseTensor>* init_h_grad_unbind,
std::vector<DenseTensor>* init_c_grad_unbind,
const std::vector<std::vector<DenseTensor>>& weight_list_grad,
int layer_idx,
bool is_bidirec,
int hidden_size,
const std::string& mode,
int gate_num) {}
const CPUContext& dev_ctx UNUSED,
const DenseTensor* input UNUSED,
const DenseTensor* output UNUSED,
const std::vector<DenseTensor>& init_h_unbind UNUSED,
const std::vector<DenseTensor>& init_c_unbind UNUSED,
const std::vector<DenseTensor>& last_h_grad_unbind UNUSED,
const std::vector<DenseTensor>& last_c_grad_unbind UNUSED,
const std::vector<DenseTensor>& gate_tensor_unbind UNUSED,
const std::vector<DenseTensor>& state_tensor_unbind UNUSED,
const std::vector<DenseTensor>& act_state_tensor_unbind UNUSED,
const DenseTensor* output_grad UNUSED,
const std::vector<std::vector<DenseTensor>>& parameter_lists UNUSED,
const DenseTensor* sequence_length UNUSED,
DenseTensor* input_grad UNUSED,
std::vector<DenseTensor>* init_h_grad_unbind UNUSED,
std::vector<DenseTensor>* init_c_grad_unbind UNUSED,
const std::vector<std::vector<DenseTensor>>& weight_list_grad UNUSED,
int layer_idx UNUSED,
bool is_bidirec UNUSED,
int hidden_size UNUSED,
const std::string& mode UNUSED,
int gate_num UNUSED) {}
void preprocess(const CPUContext& dev_ctx,
const DenseTensor* grad_output,
......@@ -978,11 +978,11 @@ void RnnGradFunc(const CPUContext& dev_ctx,
const std::vector<const DenseTensor*>& state_grad,
float dropout_prob,
bool is_bidirec,
int input_size,
int input_size UNUSED,
int hidden_size,
int num_layers,
const std::string& mode,
int seed,
int seed UNUSED,
bool is_test,
int gate_num,
DenseTensor* x_grad,
......
......@@ -292,22 +292,22 @@ struct Layer {
}
}
virtual void operator()(const CPUContext& dev_ctx,
const DenseTensor* input,
const std::vector<DenseTensor>& vec,
const std::vector<DenseTensor>& init_h,
const std::vector<DenseTensor>& init_c,
const DenseTensor* sequence_length,
std::vector<DenseTensor> last_h,
std::vector<DenseTensor> last_c,
DenseTensor* output,
const int& layer_idx,
const int& gate_num,
DenseTensor* gate_value,
DenseTensor* cell_value,
DenseTensor* cell_act_value,
const std::string& mode,
bool is_test) {}
virtual void operator()(const CPUContext& dev_ctx UNUSED,
const DenseTensor* input UNUSED,
const std::vector<DenseTensor>& vec UNUSED,
const std::vector<DenseTensor>& init_h UNUSED,
const std::vector<DenseTensor>& init_c UNUSED,
const DenseTensor* sequence_length UNUSED,
std::vector<DenseTensor> last_h UNUSED,
std::vector<DenseTensor> last_c UNUSED,
DenseTensor* output UNUSED,
const int& layer_idx UNUSED,
const int& gate_num UNUSED,
DenseTensor* gate_value UNUSED,
DenseTensor* cell_value UNUSED,
DenseTensor* cell_act_value UNUSED,
const std::string& mode UNUSED,
bool is_test UNUSED) {}
void RunTestIter(const CPUContext& dev_ctx,
const DenseTensor* input,
......@@ -320,8 +320,8 @@ struct Layer {
DenseTensor* output,
int layer_idx,
DenseTensor* gate_value,
DenseTensor* cell_value,
DenseTensor* cell_act_value,
DenseTensor* cell_value UNUSED,
DenseTensor* cell_act_value UNUSED,
bool is_bidirect,
int offset,
const std::string& mode) {
......@@ -701,7 +701,7 @@ struct SingleLayer : public Layer<T, CellType> {
std::vector<DenseTensor> last_c,
DenseTensor* output,
const int& layer_idx,
const int& gate_num,
const int& gate_num UNUSED,
DenseTensor* gate_value,
DenseTensor* cell_value,
DenseTensor* cell_act_value,
......@@ -740,7 +740,7 @@ struct BidirLayer : public Layer<T, CellType> {
std::vector<DenseTensor> last_c,
DenseTensor* output,
const int& layer_idx,
const int& gate_num,
const int& gate_num UNUSED,
DenseTensor* gate_value,
DenseTensor* cell_value,
DenseTensor* cell_act_value,
......
......@@ -32,7 +32,7 @@ namespace phi {
template <typename Context, typename T, typename IndexT>
void CalculateXGrad(const Context& ctx,
const T* out_grad,
const T* x_data,
const T* x_data UNUSED,
const T* e_data,
const phi::DDim& out_grad_dims,
const phi::DDim& x_dims,
......@@ -46,7 +46,7 @@ void CalculateXGrad(const Context& ctx,
const DenseTensor& out_grad_tensor,
DenseTensor* x_grad_tensor,
const DenseTensor* dst_count = nullptr,
const DenseTensor* out = nullptr) {
const DenseTensor* out UNUSED = nullptr) {
std::vector<int64_t> reduce_idx;
bool reduce = ReduceGrad(out_grad_dims, x_dims, reduce_idx);
......@@ -232,7 +232,7 @@ void CalculateXGrad(const Context& ctx,
template <typename T, typename IndexT>
void CalculateEGrad(const T* out_grad_data,
const T* x_data,
const T* e_data,
const T* e_data UNUSED,
const phi::DDim& x_dims,
const phi::DDim& e_dims,
const IndexT* s_index,
......@@ -308,7 +308,7 @@ void CalculateXEGradForMinMax(const T* out_grad,
const IndexT* s_index,
const IndexT* d_index,
const std::string& message_op,
const std::string& reduce_op,
const std::string& reduce_op UNUSED,
int64_t index_size,
T* x_grad,
T* e_grad,
......
......@@ -35,7 +35,7 @@ void CalculateGrad(const Context& ctx,
int64_t index_size,
int64_t slice_size,
T* x_grad,
const DenseTensor& out_grad_tensor,
const DenseTensor& out_grad_tensor UNUSED,
const DenseTensor& y) {
std::vector<int64_t> reduce_idx;
bool reduce = ReduceGrad(out_grad_dims, x_grad_dims, reduce_idx);
......
......@@ -111,7 +111,7 @@ struct UniqueConsecutiveFlattenedTensorFunctor {
template <typename Context, class ForwardIt, typename InT, typename IndexT>
static ForwardIt UniqueConsecutiveDimImpl(
const Context& context,
const Context& context UNUSED,
ForwardIt first,
ForwardIt last,
const std::vector<IndexT>& sorted_indices_vec,
......
......@@ -113,7 +113,7 @@ template <typename Context,
class CompareFunctor,
typename T>
struct GetMask {
void operator()(const Context& dev_ctx,
void operator()(const Context& dev_ctx UNUSED,
const DenseTensor& lhs,
const DenseTensor& rhs,
DenseTensor* mask) {
......
......@@ -597,7 +597,7 @@ struct SquareGradFunctor : public BaseActivationFunctor<T> {
typename Out,
typename dOut,
typename dX>
void operator()(Device d, X x, Out out, dOut dout, dX dx) const {
void operator()(Device d, X x, Out out UNUSED, dOut dout, dX dx) const {
dx.device(d) = dout * static_cast<T>(2) * x;
}
......@@ -1087,7 +1087,7 @@ struct ExpGradFunctor : public BaseActivationFunctor<T> {
typename Out,
typename dOut,
typename dX>
void operator()(Device d, X x, Out out, dOut dout, dX dx) const {
void operator()(Device d, X x UNUSED, Out out, dOut dout, dX dx) const {
dx.device(d) = dout * out;
}
......
......@@ -47,7 +47,7 @@ struct UnaryCompoundFunctor {
inline HOSTDEVICE T GetOut(T x, T y) { return func1_(func2_(x, y)); }
inline HOSTDEVICE T GetOutUseIntermediateOut(T x, T intermediat_out) {
inline HOSTDEVICE T GetOutUseIntermediateOut(T x UNUSED, T intermediat_out) {
return func1_(intermediat_out);
}
......
......@@ -475,7 +475,7 @@ inline void vec_add_bias<float, backends::cpu::avx512f>(const int n,
}
template <typename T, backends::cpu::cpu_isa_t isa = backends::cpu::isa_any>
inline void vec_identity(const int n, const T* x, T* y) {
inline void vec_identity(const int n UNUSED, const T* x UNUSED, T* y UNUSED) {
// do nothing
return;
}
......
......@@ -32,9 +32,9 @@ template <typename T>
struct StridedMemcpyFunctor<T, 0> {
void operator()(const phi::DeviceContext& dev_ctx,
const T* src,
const int64_t* src_stride,
const int64_t* dst_dim,
const int64_t* dst_stride,
const int64_t* src_stride UNUSED,
const int64_t* dst_dim UNUSED,
const int64_t* dst_stride UNUSED,
T* dst) const {
auto place = dev_ctx.GetPlace();
if (place.GetType() == phi::AllocationType::CPU) {
......@@ -58,9 +58,9 @@ template <typename T>
struct StridedMemcpyFunctor<T, 1> {
void operator()(const phi::DeviceContext& dev_ctx,
const T* src,
const int64_t* src_stride,
const int64_t* src_stride UNUSED,
const int64_t* dst_dim,
const int64_t* dst_stride,
const int64_t* dst_stride UNUSED,
T* dst) const {
auto place = dev_ctx.GetPlace();
if (place.GetType() == phi::AllocationType::CPU) {
......
......@@ -246,10 +246,10 @@ bool MatMulKernel<double>::CanBeUsed(const matmul_attr_t& attr) const {
return true;
}
#define AWALYS_USE_ME_WITH_DOUBLE(func) \
template <> \
bool func##Kernel<double>::CanBeUsed(const int& d) const { \
return true; \
#define AWALYS_USE_ME_WITH_DOUBLE(func) \
template <> \
bool func##Kernel<double>::CanBeUsed(const int& d UNUSED) const { \
return true; \
}
AWALYS_USE_ME_WITH_DOUBLE(VMul);
......
......@@ -72,7 +72,7 @@ elementwise_inner_add(const phi::CPUContext& ctx UNUSED,
* return: output tensor
*/
template <typename T, typename IndexT = int>
void ScatterAssign(const phi::CPUContext& ctx,
void ScatterAssign(const phi::CPUContext& ctx UNUSED,
const DenseTensor& src,
const DenseTensor& index,
DenseTensor* output) {
......@@ -241,7 +241,7 @@ void ScatterAssignAdd(const phi::CPUContext& ctx,
// The function is only for scatter grad x,
// however update grad use gather
template <typename T, typename IndexT = int>
void CPUScatterGradForX(const phi::CPUContext& ctx,
void CPUScatterGradForX(const phi::CPUContext& ctx UNUSED,
const DenseTensor& index,
DenseTensor* output) {
int64_t index_size = index.dims()[0];
......
......@@ -136,7 +136,7 @@ void ProductRuleBook(const Context& dev_ctx,
template <typename T, typename Context, typename IntT = int>
void UpdateRulebookAndOutIndex(const Context& dev_ctx,
const SparseCooTensor& x,
const int kernel_size,
const int kernel_size UNUSED,
const int out_channels,
const DDim& out_dims,
DenseTensor* rulebook,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册