未验证 提交 39f365c4 编写于 作者: G Galaxy1458 提交者: GitHub

test,test=develop (#53847)

上级 16fcbb9b
......@@ -34,7 +34,7 @@ namespace phi {
template <typename Context, typename T, typename IndType>
struct Argmax {
void operator()(const Context& dev_ctx,
void operator()(const Context& dev_ctx UNUSED,
const DenseTensor& input,
DenseTensor* out_idx,
DenseTensor* out,
......
......@@ -128,14 +128,14 @@ void YoloLossGradKernel(const Context& dev_ctx,
const std::vector<int>& anchors,
const std::vector<int>& anchor_mask,
int class_num,
float ignore_thresh,
float ignore_thresh UNUSED,
int downsample_ratio,
bool use_label_smooth,
float scale_x_y,
float scale_x_y UNUSED,
DenseTensor* x_grad,
DenseTensor* gt_box_grad,
DenseTensor* gt_label_grad,
DenseTensor* gt_score_grad) {
DenseTensor* gt_box_grad UNUSED,
DenseTensor* gt_label_grad UNUSED,
DenseTensor* gt_score_grad UNUSED) {
auto* input = &x;
auto input_grad = x_grad;
auto* objness_mask = &objectness_mask;
......
......@@ -1651,7 +1651,7 @@ struct ELUGradFunctor : public BaseActivationFunctor<T> {
typename Out,
typename dOut,
typename dX>
void operator()(Device d, X x, Out out, dOut dout, dX dx) const {
void operator()(Device d, X x UNUSED, Out out, dOut dout, dX dx) const {
// case 1: alpha >= 0
// dx = dout, if out > 0
// dx = dout * (out + alpha), if out <= 0
......
......@@ -455,7 +455,7 @@ class SparseAdamFunctor<T, CPUAdam, T> {
const int64_t* rows,
int64_t row_numel,
int64_t row_count,
bool lazy_mode)
bool lazy_mode UNUSED)
: beta1_(beta1),
beta2_(beta2),
epsilon_(epsilon),
......
......@@ -71,7 +71,7 @@ struct CBlas<phi::dtype::bfloat16> {
}
template <typename... ARGS>
static void VCOPY(ARGS... args) {
static void VCOPY(ARGS... args UNUSED) {
PADDLE_THROW(phi::errors::Unimplemented(
"Blas VCOPY do not supported on CPU with bfloat16,"
" please check your code"));
......
......@@ -99,7 +99,7 @@ inline void ModulatedDeformableIm2colCPUKernel(
}
template <typename T, typename Context>
void ModulatedDeformableIm2col(const Context& dev_ctx,
void ModulatedDeformableIm2col(const Context& dev_ctx UNUSED,
const T* data_im,
const T* data_offset,
const T* data_mask,
......
......@@ -932,13 +932,13 @@ inline void gru_backward(const Context &context,
template <class OpGruGrad, typename T, typename Context>
inline void cpu_gru_backward(const Context &context,
OpGruGrad op_gru_grad,
OpGruGrad op_gru_grad UNUSED,
phi::funcs::GRUMetaValue<T> value,
phi::funcs::GRUMetaGrad<T> grad,
int frame_size,
int batch_size,
ActivationType active_node,
ActivationType active_gate) {
ActivationType active_node UNUSED,
ActivationType active_gate UNUSED) {
for (int b = 0; b < batch_size; ++b) {
// eigen
gru_backward(context, value, grad, frame_size);
......
......@@ -274,7 +274,7 @@ struct FMaxFunctor<int64_t> {
template <typename T>
struct FMaxGradDx {
HOSTDEVICE T operator()(T x, T y, T out, T dout) const {
HOSTDEVICE T operator()(T x, T y, T out UNUSED, T dout) const {
return dout * static_cast<T>((x >= y) || isnan(y));
}
};
......@@ -308,7 +308,7 @@ struct FMaxGradDx<int64_t> {
template <typename T>
struct FMaxGradDy {
HOSTDEVICE T operator()(T x, T y, T out, T dout) const {
HOSTDEVICE T operator()(T x, T y, T out UNUSED, T dout) const {
return dout * static_cast<T>(!((x >= y) || isnan(y)));
}
};
......@@ -342,7 +342,7 @@ struct FMaxGradDy<int> {
template <typename T>
struct FMinGradDx {
HOSTDEVICE T operator()(T x, T y, T out, T dout) const {
HOSTDEVICE T operator()(T x, T y, T out UNUSED, T dout) const {
return dout * static_cast<T>((x <= y) || isnan(y));
}
};
......@@ -376,7 +376,7 @@ struct FMinGradDx<int64_t> {
template <typename T>
struct FMinGradDy {
HOSTDEVICE T operator()(T x, T y, T out, T dout) const {
HOSTDEVICE T operator()(T x, T y, T out UNUSED, T dout) const {
return dout * static_cast<T>(!((x <= y) || isnan(y)));
}
};
......
......@@ -210,7 +210,7 @@ static T compute_factor(size_t size, FFTNormMode normalization) {
template <typename Ti, typename To>
struct FFTC2CFunctor<phi::CPUContext, Ti, To> {
void operator()(const phi::CPUContext& ctx,
void operator()(const phi::CPUContext& ctx UNUSED,
const DenseTensor& x,
DenseTensor* out,
const std::vector<int64_t>& axes,
......@@ -253,7 +253,7 @@ struct FFTC2CFunctor<phi::CPUContext, Ti, To> {
template <typename Ti, typename To>
struct FFTR2CFunctor<phi::CPUContext, Ti, To> {
void operator()(const phi::CPUContext& ctx,
void operator()(const phi::CPUContext& ctx UNUSED,
const DenseTensor& x,
DenseTensor* out,
const std::vector<int64_t>& axes,
......@@ -310,7 +310,7 @@ struct FFTR2CFunctor<phi::CPUContext, Ti, To> {
template <typename Ti, typename To>
struct FFTC2RFunctor<phi::CPUContext, Ti, To> {
void operator()(const phi::CPUContext& ctx,
void operator()(const phi::CPUContext& ctx UNUSED,
const DenseTensor& x,
DenseTensor* out,
const std::vector<int64_t>& axes,
......
......@@ -21,14 +21,14 @@ namespace phi {
namespace funcs {
template <typename T>
struct MulGradFunctor {
inline HOSTDEVICE T Dx(T x, T y) { return y; }
inline HOSTDEVICE T Dy(T x, T y) { return x; }
inline HOSTDEVICE T Dx(T x UNUSED, T y) { return y; }
inline HOSTDEVICE T Dy(T x, T y UNUSED) { return x; }
};
template <typename T>
struct AddGradFunctor {
inline HOSTDEVICE T Dx(T x, T y) { return static_cast<T>(1.); }
inline HOSTDEVICE T Dy(T x, T y) { return static_cast<T>(1.); }
inline HOSTDEVICE T Dx(T x UNUSED, T y UNUSED) { return static_cast<T>(1.); }
inline HOSTDEVICE T Dy(T x UNUSED, T y UNUSED) { return static_cast<T>(1.); }
};
template <typename T>
......@@ -48,9 +48,9 @@ template <typename T>
struct ScaleGradFunctor {
explicit ScaleGradFunctor(T coeff) : coeff_(coeff) {}
inline HOSTDEVICE T UseX(T x) { return coeff_; }
inline HOSTDEVICE T UseOut(T out) { return coeff_; }
inline HOSTDEVICE T UseXAndOut(T x, T out) { return coeff_; }
inline HOSTDEVICE T UseX(T x UNUSED) { return coeff_; }
inline HOSTDEVICE T UseOut(T out UNUSED) { return coeff_; }
inline HOSTDEVICE T UseXAndOut(T x UNUSED, T out UNUSED) { return coeff_; }
private:
T coeff_;
......@@ -71,7 +71,7 @@ struct ReluGradFunctor {
inline HOSTDEVICE T UseOut(T out) {
return out > static_cast<T>(0) ? static_cast<T>(1) : static_cast<T>(0);
}
inline HOSTDEVICE T UseXAndOut(T x, T out) {
inline HOSTDEVICE T UseXAndOut(T x UNUSED, T out) {
return out > static_cast<T>(0) ? static_cast<T>(1) : static_cast<T>(0);
}
};
......@@ -93,7 +93,7 @@ template <typename T>
struct TanhGradFunctor {
inline HOSTDEVICE T UseX(T x) { return static_cast<T>(1) - x * x; }
inline HOSTDEVICE T UseOut(T out) { return static_cast<T>(1) - out * out; }
inline HOSTDEVICE T UseXAndOut(T x, T out) {
inline HOSTDEVICE T UseXAndOut(T x UNUSED, T out) {
return static_cast<T>(1) - out * out;
}
};
......@@ -113,7 +113,7 @@ template <typename T>
struct SigmoidGradFunctor {
inline HOSTDEVICE T UseX(T x) { return x * (static_cast<T>(1) - x); }
inline HOSTDEVICE T UseOut(T out) { return out * (static_cast<T>(1) - out); }
inline HOSTDEVICE T UseXAndOut(T x, T out) {
inline HOSTDEVICE T UseXAndOut(T x UNUSED, T out) {
return out * (static_cast<T>(1) - out);
}
};
......@@ -161,7 +161,7 @@ struct GeluGradFunctor {
static_cast<MT>(0.5) * (static_cast<MT>(1) + tanh_out);
return static_cast<T>(ans);
}
inline HOSTDEVICE T UseXAndOut(T x, T out) {
inline HOSTDEVICE T UseXAndOut(T x, T out UNUSED) {
MT mx = static_cast<MT>(x);
MT tanh_out =
tanh(static_cast<MT>(0.79788456) * mx *
......
......@@ -30,7 +30,7 @@ namespace funcs {
template <class T, typename DeviceContext>
class Im2ColFunctor<phi::funcs::ColFormat::kCFO, DeviceContext, T> {
public:
void operator()(const DeviceContext& context,
void operator()(const DeviceContext& context UNUSED,
const phi::DenseTensor& im,
const std::vector<int>& dilation,
const std::vector<int>& stride,
......@@ -75,7 +75,7 @@ class Im2ColFunctor<phi::funcs::ColFormat::kCFO, DeviceContext, T> {
template <class T, typename DeviceContext>
class Col2ImFunctor<phi::funcs::ColFormat::kCFO, DeviceContext, T> {
public:
void operator()(const DeviceContext& context,
void operator()(const DeviceContext& context UNUSED,
const phi::DenseTensor& col,
const std::vector<int>& dilation,
const std::vector<int>& stride,
......@@ -175,13 +175,13 @@ template class Col2ImFunctor<phi::funcs::ColFormat::kCFO,
template <class T, typename DeviceContext>
class Im2ColFunctor<phi::funcs::ColFormat::kOCF, DeviceContext, T> {
public:
void operator()(const DeviceContext& context,
void operator()(const DeviceContext& context UNUSED,
const phi::DenseTensor& im,
const std::vector<int>& dilation,
const std::vector<int>& dilation UNUSED,
const std::vector<int>& stride,
const std::vector<int>& padding,
phi::DenseTensor* col,
const DataLayout data_layout) {
const DataLayout data_layout UNUSED) {
PADDLE_ENFORCE_EQ(im.dims().size(),
3,
phi::errors::InvalidArgument(
......@@ -248,13 +248,13 @@ class Im2ColFunctor<phi::funcs::ColFormat::kOCF, DeviceContext, T> {
template <class T, typename DeviceContext>
class Col2ImFunctor<phi::funcs::ColFormat::kOCF, DeviceContext, T> {
public:
void operator()(const DeviceContext& context,
void operator()(const DeviceContext& context UNUSED,
const phi::DenseTensor& col,
const std::vector<int>& dilation,
const std::vector<int>& dilation UNUSED,
const std::vector<int>& stride,
const std::vector<int>& padding,
phi::DenseTensor* im,
const DataLayout data_layout) {
const DataLayout data_layout UNUSED) {
PADDLE_ENFORCE_EQ(im->dims().size(),
3,
phi::errors::InvalidArgument(
......
......@@ -31,7 +31,7 @@ struct IsNanFunctor {
template <typename T>
struct IsNanFunctor<T,
typename std::enable_if<std::is_integral<T>::value>::type> {
HOSTDEVICE bool operator()(const T& a) const { return false; }
HOSTDEVICE bool operator()(const T& a UNUSED) const { return false; }
};
// isnan is defined in namespace std in float16.h, but
......@@ -66,7 +66,7 @@ struct IsInfFunctor {
template <typename T>
struct IsInfFunctor<T,
typename std::enable_if<std::is_integral<T>::value>::type> {
HOSTDEVICE bool operator()(const T& a) const { return false; }
HOSTDEVICE bool operator()(const T& a UNUSED) const { return false; }
};
template <>
......@@ -98,7 +98,7 @@ template <typename T>
struct IsFiniteFunctor<
T,
typename std::enable_if<std::is_integral<T>::value>::type> {
HOSTDEVICE bool operator()(const T& a) const { return true; }
HOSTDEVICE bool operator()(const T& a UNUSED) const { return true; }
};
template <>
......
......@@ -126,7 +126,9 @@ class ColwiseSum2D<phi::GPUContext, T> {
template <typename T>
class ColwiseSum2D<phi::CPUContext, T> {
public:
ColwiseSum2D(int left, int right, const phi::CPUContext& dev_ctx) {}
ColwiseSum2D(int left UNUSED,
int right UNUSED,
const phi::CPUContext& dev_ctx UNUSED) {}
void operator()(const phi::CPUContext& context,
const DenseTensor& input,
......
......@@ -243,7 +243,7 @@ template struct RowwiseMean<phi::CPUContext, double>;
template <typename T>
struct RowwiseAdd<phi::CPUContext, T> {
void operator()(const phi::CPUContext& context,
void operator()(const phi::CPUContext& context UNUSED,
const phi::DenseTensor& input,
const phi::DenseTensor& vector,
phi::DenseTensor* output) {
......
......@@ -1622,10 +1622,10 @@ class MaxPool2dWithIndexGradFunctor<CPUContext, T1, T2> {
void operator()(const CPUContext& context,
const DenseTensor& output_grad,
const DenseTensor& mask,
const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings,
bool adaptive,
const std::vector<int>& ksize UNUSED,
const std::vector<int>& strides UNUSED,
const std::vector<int>& paddings UNUSED,
bool adaptive UNUSED,
DenseTensor* input_grad) {
const int batch_size = input_grad->dims()[0];
const int input_height = input_grad->dims()[2];
......@@ -1775,10 +1775,10 @@ class MaxPool3dWithIndexGradFunctor<CPUContext, T1, T2> {
void operator()(const CPUContext& context,
const DenseTensor& output_grad,
const DenseTensor& mask,
const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings,
bool adaptive,
const std::vector<int>& ksize UNUSED,
const std::vector<int>& strides UNUSED,
const std::vector<int>& paddings UNUSED,
bool adaptive UNUSED,
DenseTensor* input_grad) {
const int batch_size = input_grad->dims()[0];
const int input_depth = input_grad->dims()[2];
......
......@@ -45,7 +45,7 @@ class MaxPool {
public:
DEVICE inline T initial() { return static_cast<T>(-FLT_MAX); }
HOSTDEVICE inline void compute(const T& x, T* y) { *y = *y > x ? *y : x; }
DEVICE inline void finalize(const T& pool_field, T* y) {}
DEVICE inline void finalize(const T& pool_field UNUSED, T* y UNUSED) {}
};
template <class T>
......@@ -59,7 +59,7 @@ class AvgPool {
return static_cast<T>(0);
}
DEVICE inline void compute(const T& x, T* y) {
DEVICE inline void compute(const T& x, T* y UNUSED) {
intermediate_res += static_cast<MT>(x);
}
......@@ -83,7 +83,7 @@ class AvgPoolGrad {
public:
static constexpr bool use_x = false;
HOSTDEVICE inline void compute(
const T& x, const T& y, const T& dy, T scale, T* dx) {
const T& x UNUSED, const T& y UNUSED, const T& dy, T scale, T* dx) {
*dx += (scale * dy);
}
};
......
......@@ -420,7 +420,7 @@ typename std::enable_if<!std::is_integral<T>::value>::type elementwise_add_to(
template <typename T, typename DeviceContext>
typename std::enable_if<std::is_integral<T>::value>::type elementwise_add_to(
phi::funcs::BlasT<DeviceContext, T>* blas,
phi::funcs::BlasT<DeviceContext, T>* blas UNUSED,
size_t data_len,
const T* in,
T* out) {
......
......@@ -20,7 +20,7 @@ namespace funcs {
template <typename T>
class CopyMatrixRowsFunctor<phi::CPUContext, T> {
public:
void operator()(const phi::CPUContext& context,
void operator()(const phi::CPUContext& context UNUSED,
const phi::DenseTensor& src,
phi::Vector<size_t> index_lod,
phi::DenseTensor* dst,
......
......@@ -36,7 +36,7 @@ using EigenMatrix = phi::EigenMatrix<T, MajorType, IndexType>;
template <typename T, bool is_test>
class MaxSeqPoolFunctor {
public:
void operator()(const phi::CPUContext& context,
void operator()(const phi::CPUContext& context UNUSED,
const phi::DenseTensor& input,
T pad_value,
phi::DenseTensor* output,
......@@ -115,11 +115,11 @@ class MaxSeqPoolFunctor {
template <typename T>
class MaxSeqPoolFunctor<T, true> {
public:
void operator()(const phi::CPUContext& context,
void operator()(const phi::CPUContext& context UNUSED,
const phi::DenseTensor& input,
T pad_value,
phi::DenseTensor* output,
phi::DenseTensor* index) {
phi::DenseTensor* index UNUSED) {
auto in_dims = input.dims();
auto out_dims = output->dims();
PADDLE_ENFORCE_GT(in_dims.size(),
......@@ -239,7 +239,7 @@ class MaxSeqPoolGradFunctor {
template <typename T>
class LastSeqPoolFunctor {
public:
void operator()(const phi::CPUContext& context,
void operator()(const phi::CPUContext& context UNUSED,
const phi::DenseTensor& input,
T pad_value,
phi::DenseTensor* output) {
......@@ -273,7 +273,7 @@ class LastSeqPoolFunctor {
template <typename T>
class FirstSeqPoolFunctor {
public:
void operator()(const phi::CPUContext& context,
void operator()(const phi::CPUContext& context UNUSED,
const phi::DenseTensor& input,
T pad_value,
phi::DenseTensor* output) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册