未验证 提交 58435ae5 编写于 作者: G Galaxy1458 提交者: GitHub

remove some [-Wunused-parameter]warning (#53397)

* test,test=develop

* test,test=develop

* test,test=develop

* test,test=develop

* test,test=develop

* test,test=develop

* test,test=develop

* test,test=develop
上级 2039115c
...@@ -43,7 +43,7 @@ inline std::vector<paddle::Tensor> AmpAutoCasts( ...@@ -43,7 +43,7 @@ inline std::vector<paddle::Tensor> AmpAutoCasts(
const std::string& inputs_name, const std::string& inputs_name,
const std::vector<paddle::Tensor>& inputs, const std::vector<paddle::Tensor>& inputs,
const phi::DataType& dst_dtype, const phi::DataType& dst_dtype,
std::string op_name) { std::string op_name UNUSED) {
VLOG(6) << "AMP AmpAutoCasts:" VLOG(6) << "AMP AmpAutoCasts:"
<< " inputs(" << inputs_name << ") dst_dtype(" << " inputs(" << inputs_name << ") dst_dtype("
<< phi::DataTypeToString(dst_dtype) << ")."; << phi::DataTypeToString(dst_dtype) << ").";
......
...@@ -1147,7 +1147,7 @@ struct ReluGradFunctor : public BaseActivationFunctor<T> { ...@@ -1147,7 +1147,7 @@ struct ReluGradFunctor : public BaseActivationFunctor<T> {
typename Out, typename Out,
typename dOut, typename dOut,
typename dX> typename dX>
void operator()(Device d, X x, Out out, dOut dout, dX dx) const { void operator()(Device d, X x UNUSED, Out out, dOut dout, dX dx) const {
dx.device(d) = dout * (out > static_cast<T>(0)).template cast<T>(); dx.device(d) = dout * (out > static_cast<T>(0)).template cast<T>();
} }
......
...@@ -247,7 +247,7 @@ struct UnaryCompoundGradDIntermediateFunctor { ...@@ -247,7 +247,7 @@ struct UnaryCompoundGradDIntermediateFunctor {
} }
} }
inline HOSTDEVICE T UseIntermediateOut(T x, inline HOSTDEVICE T UseIntermediateOut(T x UNUSED,
T intermediate_out, T intermediate_out,
T out, T out,
T dout) { T dout) {
......
...@@ -112,8 +112,8 @@ struct MeanGradFunctor { ...@@ -112,8 +112,8 @@ struct MeanGradFunctor {
typename DY, typename DY,
typename Dim> typename Dim>
void operator()(const DeviceContext& place, void operator()(const DeviceContext& place,
X* x, X* x UNUSED,
Y* y, Y* y UNUSED,
DX* dx, DX* dx,
DY* dy, DY* dy,
const Dim& dim, const Dim& dim,
......
...@@ -142,7 +142,7 @@ class FusedMatmulOneDNNHandler ...@@ -142,7 +142,7 @@ class FusedMatmulOneDNNHandler
float ComputeOutputScale(float matmul_alpha, float ComputeOutputScale(float matmul_alpha,
const float scale_x, const float scale_x,
const float scale_y, const float scale_y,
const float scale_in_eltwise, const float scale_in_eltwise UNUSED,
const float scale_out, const float scale_out,
const bool force_fp32_output) { const bool force_fp32_output) {
float f_scale_out = force_fp32_output ? 1.0f : scale_out; float f_scale_out = force_fp32_output ? 1.0f : scale_out;
......
...@@ -357,7 +357,7 @@ void SliceArrayGradKernel(const Context& dev_ctx, ...@@ -357,7 +357,7 @@ void SliceArrayGradKernel(const Context& dev_ctx,
const TensorArray& input, const TensorArray& input,
const TensorArray& out_grad, const TensorArray& out_grad,
const IntArray& starts, const IntArray& starts,
const IntArray& ends, const IntArray& ends UNUSED,
TensorArray* input_grad) { TensorArray* input_grad) {
int64_t d_in_size = input.size(); int64_t d_in_size = input.size();
input_grad->resize(d_in_size); input_grad->resize(d_in_size);
......
...@@ -22,9 +22,9 @@ ...@@ -22,9 +22,9 @@
namespace phi { namespace phi {
template <typename T, typename Context> template <typename T, typename Context>
void NPUIdentityKernel(const Context& dev_ctx, void NPUIdentityKernel(const Context& dev_ctx UNUSED,
const DenseTensor& x, const DenseTensor& x,
const int format, const int format UNUSED,
DenseTensor* out) { DenseTensor* out) {
VLOG(4) << "npu_identity op is only for NPU, please avoid using this kernel!"; VLOG(4) << "npu_identity op is only for NPU, please avoid using this kernel!";
out->ShareDataWith(x); out->ShareDataWith(x);
......
...@@ -20,7 +20,7 @@ namespace phi { ...@@ -20,7 +20,7 @@ namespace phi {
namespace sparse { namespace sparse {
template <typename T, typename Context> template <typename T, typename Context>
void ValuesCooGradKernel(const Context& dev_ctx, void ValuesCooGradKernel(const Context& dev_ctx UNUSED,
const SparseCooTensor& x, const SparseCooTensor& x,
const DenseTensor& out_grad, const DenseTensor& out_grad,
SparseCooTensor* x_grad) { SparseCooTensor* x_grad) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册