未验证 提交 58435ae5 编写于 作者: G Galaxy1458 提交者: GitHub

remove some [-Wunused-parameter]warning (#53397)

* test,test=develop

* test,test=develop

* test,test=develop

* test,test=develop

* test,test=develop

* test,test=develop

* test,test=develop

* test,test=develop
上级 2039115c
......@@ -43,7 +43,7 @@ inline std::vector<paddle::Tensor> AmpAutoCasts(
const std::string& inputs_name,
const std::vector<paddle::Tensor>& inputs,
const phi::DataType& dst_dtype,
std::string op_name) {
std::string op_name UNUSED) {
VLOG(6) << "AMP AmpAutoCasts:"
<< " inputs(" << inputs_name << ") dst_dtype("
<< phi::DataTypeToString(dst_dtype) << ").";
......
......@@ -1147,7 +1147,7 @@ struct ReluGradFunctor : public BaseActivationFunctor<T> {
typename Out,
typename dOut,
typename dX>
void operator()(Device d, X x, Out out, dOut dout, dX dx) const {
void operator()(Device d, X x UNUSED, Out out, dOut dout, dX dx) const {
dx.device(d) = dout * (out > static_cast<T>(0)).template cast<T>();
}
......
......@@ -247,7 +247,7 @@ struct UnaryCompoundGradDIntermediateFunctor {
}
}
inline HOSTDEVICE T UseIntermediateOut(T x,
inline HOSTDEVICE T UseIntermediateOut(T x UNUSED,
T intermediate_out,
T out,
T dout) {
......
......@@ -112,8 +112,8 @@ struct MeanGradFunctor {
typename DY,
typename Dim>
void operator()(const DeviceContext& place,
X* x,
Y* y,
X* x UNUSED,
Y* y UNUSED,
DX* dx,
DY* dy,
const Dim& dim,
......
......@@ -142,7 +142,7 @@ class FusedMatmulOneDNNHandler
float ComputeOutputScale(float matmul_alpha,
const float scale_x,
const float scale_y,
const float scale_in_eltwise,
const float scale_in_eltwise UNUSED,
const float scale_out,
const bool force_fp32_output) {
float f_scale_out = force_fp32_output ? 1.0f : scale_out;
......
......@@ -357,7 +357,7 @@ void SliceArrayGradKernel(const Context& dev_ctx,
const TensorArray& input,
const TensorArray& out_grad,
const IntArray& starts,
const IntArray& ends,
const IntArray& ends UNUSED,
TensorArray* input_grad) {
int64_t d_in_size = input.size();
input_grad->resize(d_in_size);
......
......@@ -22,9 +22,9 @@
namespace phi {
template <typename T, typename Context>
void NPUIdentityKernel(const Context& dev_ctx,
void NPUIdentityKernel(const Context& dev_ctx UNUSED,
const DenseTensor& x,
const int format,
const int format UNUSED,
DenseTensor* out) {
VLOG(4) << "npu_identity op is only for NPU, please avoid using this kernel!";
out->ShareDataWith(x);
......
......@@ -20,7 +20,7 @@ namespace phi {
namespace sparse {
template <typename T, typename Context>
void ValuesCooGradKernel(const Context& dev_ctx,
void ValuesCooGradKernel(const Context& dev_ctx UNUSED,
const SparseCooTensor& x,
const DenseTensor& out_grad,
SparseCooTensor* x_grad) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册