未验证 提交 3e1fffea 编写于 作者: G Galaxy1458 提交者: GitHub

remove some [-Wunsed-parameter] warning (#53689)

* test,test=develop

* test,test=develop

* test,test=develop

* test,test=develop

* test,test=develop

* test,test=develop
上级 8ed01e8d
......@@ -101,12 +101,12 @@ class ProcessGroupWithStream : public ProcessGroup {
}
std::shared_ptr<ProcessGroup::Task> AllGather(
phi::DenseTensor* out_tensor,
const phi::DenseTensor& in_tensor,
int64_t offset,
int64_t numel,
bool sync_op,
bool use_calc_stream) override {
phi::DenseTensor* out_tensor UNUSED,
const phi::DenseTensor& in_tensor UNUSED,
int64_t offset UNUSED,
int64_t numel UNUSED,
bool sync_op UNUSED,
bool use_calc_stream UNUSED) override {
PADDLE_THROW(phi::errors::Unimplemented(
"ProcessGroupWithStream (%s) does not support all_gather.",
GetBackendName()));
......@@ -125,11 +125,11 @@ class ProcessGroupWithStream : public ProcessGroup {
}
std::shared_ptr<ProcessGroup::Task> AllReduce(
phi::DenseTensor* out_tensor,
const phi::DenseTensor& in_tensor,
const AllreduceOptions& opts,
bool sync_op,
bool use_calc_stream) override {
phi::DenseTensor* out_tensor UNUSED,
const phi::DenseTensor& in_tensor UNUSED,
const AllreduceOptions& opts UNUSED,
bool sync_op UNUSED,
bool use_calc_stream UNUSED) override {
PADDLE_THROW(phi::errors::Unimplemented(
"ProcessGroupWithStream (%s) does not support all_reduce.",
GetBackendName()));
......@@ -150,12 +150,12 @@ class ProcessGroupWithStream : public ProcessGroup {
}
std::shared_ptr<ProcessGroup::Task> AllToAll(
phi::DenseTensor* out_tensor,
const phi::DenseTensor& in_tensor,
const std::vector<int64_t>& out_size_each_rank,
const std::vector<int64_t>& in_size_each_rank,
bool sync_op,
bool use_calc_stream) override {
phi::DenseTensor* out_tensor UNUSED,
const phi::DenseTensor& in_tensor UNUSED,
const std::vector<int64_t>& out_size_each_rank UNUSED,
const std::vector<int64_t>& in_size_each_rank UNUSED,
bool sync_op UNUSED,
bool use_calc_stream UNUSED) override {
PADDLE_THROW(phi::errors::Unimplemented(
"ProcessGroupWithStream (%s) does not support all_to_all.",
GetBackendName()));
......@@ -174,11 +174,11 @@ class ProcessGroupWithStream : public ProcessGroup {
}
std::shared_ptr<ProcessGroup::Task> Broadcast(
phi::DenseTensor* out_tensor,
const phi::DenseTensor& in_tensor,
const BroadcastOptions& opts,
bool sync_op,
bool use_calc_stream) override {
phi::DenseTensor* out_tensor UNUSED,
const phi::DenseTensor& in_tensor UNUSED,
const BroadcastOptions& opts UNUSED,
bool sync_op UNUSED,
bool use_calc_stream UNUSED) override {
PADDLE_THROW(phi::errors::Unimplemented(
"ProcessGroupWithStream (%s) does not support broadcast.",
GetBackendName()));
......@@ -195,11 +195,12 @@ class ProcessGroupWithStream : public ProcessGroup {
/*use_calc_stream*/ false);
}
std::shared_ptr<ProcessGroup::Task> Reduce(phi::DenseTensor* out_tensor,
const phi::DenseTensor& in_tensor,
const ReduceOptions& opts,
bool sync_op,
bool use_calc_stream) override {
std::shared_ptr<ProcessGroup::Task> Reduce(
phi::DenseTensor* out_tensor UNUSED,
const phi::DenseTensor& in_tensor UNUSED,
const ReduceOptions& opts UNUSED,
bool sync_op UNUSED,
bool use_calc_stream UNUSED) override {
PADDLE_THROW(phi::errors::Unimplemented(
"ProcessGroupWithStream (%s) does not support reduce.",
GetBackendName()));
......@@ -218,11 +219,11 @@ class ProcessGroupWithStream : public ProcessGroup {
}
std::shared_ptr<ProcessGroup::Task> ReduceScatter(
phi::DenseTensor* out_tensor,
const phi::DenseTensor& in_tensor,
const ReduceScatterOptions& opts,
bool sync_op,
bool use_calc_stream) override {
phi::DenseTensor* out_tensor UNUSED,
const phi::DenseTensor& in_tensor UNUSED,
const ReduceScatterOptions& opts UNUSED,
bool sync_op UNUSED,
bool use_calc_stream UNUSED) override {
PADDLE_THROW(phi::errors::Unimplemented(
"ProcessGroupWithStream (%s) does not support reduce_scatter.",
GetBackendName()));
......@@ -239,11 +240,12 @@ class ProcessGroupWithStream : public ProcessGroup {
/*use_calc_stream*/ false);
}
std::shared_ptr<ProcessGroup::Task> Scatter(phi::DenseTensor* out_tensor,
const phi::DenseTensor& in_tensor,
const ScatterOptions& opts,
bool sync_op,
bool use_calc_stream) override {
std::shared_ptr<ProcessGroup::Task> Scatter(
phi::DenseTensor* out_tensor UNUSED,
const phi::DenseTensor& in_tensor UNUSED,
const ScatterOptions& opts UNUSED,
bool sync_op UNUSED,
bool use_calc_stream UNUSED) override {
PADDLE_THROW(phi::errors::Unimplemented(
"ProcessGroupWithStream (%s) does not support scatter.",
GetBackendName()));
......@@ -284,12 +286,13 @@ class ProcessGroupWithStream : public ProcessGroup {
use_calc_stream);
}
std::shared_ptr<ProcessGroup::Task> Recv(phi::DenseTensor* tensor,
int src_rank,
int64_t offset,
int64_t numel,
bool sync_op,
bool use_calc_stream) override {
std::shared_ptr<ProcessGroup::Task> Recv(phi::DenseTensor* tensor UNUSED,
int src_rank UNUSED,
int64_t offset UNUSED,
int64_t numel UNUSED,
bool sync_op UNUSED,
bool use_calc_stream
UNUSED) override {
PADDLE_THROW(phi::errors::Unimplemented(
"ProcessGroupWithStream (%s) does not support recv.",
GetBackendName()));
......@@ -330,12 +333,13 @@ class ProcessGroupWithStream : public ProcessGroup {
use_calc_stream);
}
std::shared_ptr<ProcessGroup::Task> Send(const phi::DenseTensor& tensor,
int dst_rank,
int64_t offset,
int64_t numel,
bool sync_op,
bool use_calc_stream) override {
std::shared_ptr<ProcessGroup::Task> Send(
const phi::DenseTensor& tensor UNUSED,
int dst_rank UNUSED,
int64_t offset UNUSED,
int64_t numel UNUSED,
bool sync_op UNUSED,
bool use_calc_stream UNUSED) override {
PADDLE_THROW(phi::errors::Unimplemented(
"ProcessGroupWithStream (%s) does not support send.",
GetBackendName()));
......
......@@ -535,25 +535,25 @@ class HeterService : public PsService {
bool IsExit() { return is_exit_; }
private:
int32_t stop_profiler(const PsRequestMessage& request,
PsResponseMessage& response, // NOLINT
brpc::Controller* cntl) {
int32_t stop_profiler(const PsRequestMessage& request UNUSED,
PsResponseMessage& response UNUSED, // NOLINT
brpc::Controller* cntl UNUSED) {
platform::DisableProfiler(
platform::EventSortingKey::kDefault,
string::Sprintf("heter_worker_%s_profile", endpoint_));
return 0;
}
int32_t start_profiler(const PsRequestMessage& request,
PsResponseMessage& response, // NOLINT
brpc::Controller* cntl) {
int32_t start_profiler(const PsRequestMessage& request UNUSED,
PsResponseMessage& response UNUSED, // NOLINT
brpc::Controller* cntl UNUSED) {
platform::EnableProfiler(platform::ProfilerState::kAll);
return 0;
}
int32_t stop_heter_worker(const PsRequestMessage& request,
PsResponseMessage& response, // NOLINT
brpc::Controller* cntl) {
PsResponseMessage& response UNUSED, // NOLINT
brpc::Controller* cntl UNUSED) {
auto client_id = request.client_id();
stop_cpu_worker_set_.insert(client_id);
if (stop_cpu_worker_set_.size() == fan_in_) {
......
......@@ -65,7 +65,7 @@ inline std::shared_ptr<EagerLayoutTransformer> EagerLayoutAutotune(
const std::string& op_name,
const paddle::small_vector<std::vector<paddle::Tensor>,
kSlotSmallVectorSize>& tensors_vector,
T* attr) {
T* attr UNUSED) {
// For lightly op like reduce
if (!(DesiredLayout() == phi::DataLayout::UNDEFINED)) {
VLOG(4) << "LayoutAutotune was unstarted. Current op :" << op_name;
......@@ -81,7 +81,7 @@ inline std::shared_ptr<EagerLayoutTransformer> EagerLayoutAutotune(
const paddle::small_vector<std::vector<paddle::Tensor>,
kSlotSmallVectorSize>& tensors_vector,
T1* axis,
T2* keep_dim) {
T2* keep_dim UNUSED) {
// For lightly op like argmax
return EagerLayoutAutotune<T1>(op_name, tensors_vector, axis);
}
......
......@@ -226,7 +226,7 @@ class Metric {
const std::string& pred_varname,
const std::string& uid_varname,
int metric_phase,
int bucket_size = 1000000) {
int bucket_size UNUSED = 1000000) {
label_varname_ = label_varname;
pred_varname_ = pred_varname;
uid_varname_ = uid_varname;
......@@ -297,7 +297,7 @@ class Metric {
}
virtual ~MultiTaskMetricMsg() {}
void add_data(const Scope* exe_scope,
const paddle::platform::Place& place) override {
const paddle::platform::Place& place UNUSED) override {
std::vector<int64_t> cmatch_rank_data;
get_data<int64_t>(exe_scope, cmatch_rank_varname_, &cmatch_rank_data);
std::vector<int64_t> label_data;
......@@ -378,7 +378,7 @@ class Metric {
}
virtual ~CmatchRankMetricMsg() {}
void add_data(const Scope* exe_scope,
const paddle::platform::Place& place) override {
const paddle::platform::Place& place UNUSED) override {
std::vector<int64_t> cmatch_rank_data;
get_data<int64_t>(exe_scope, cmatch_rank_varname_, &cmatch_rank_data);
std::vector<int64_t> label_data;
......@@ -502,7 +502,7 @@ class Metric {
}
virtual ~CmatchRankMaskMetricMsg() {}
void add_data(const Scope* exe_scope,
const paddle::platform::Place& place) override {
const paddle::platform::Place& place UNUSED) override {
std::vector<int64_t> cmatch_rank_data;
get_data<int64_t>(exe_scope, cmatch_rank_varname_, &cmatch_rank_data);
std::vector<int64_t> label_data;
......
......@@ -257,9 +257,9 @@ struct OpKernelRegistrarFunctorEx<PlaceType,
true,
I,
DataTypeAndKernelType...> {
void operator()(const char* op_type,
const char* library_type,
int customized_type_value) const {}
void operator()(const char* op_type UNUSED,
const char* library_type UNUSED,
int customized_type_value UNUSED) const {}
};
template <typename PlaceType, size_t I, typename... DataTypeAndKernelType>
......
......@@ -334,7 +334,7 @@ struct SoftReluGradFunctor : public BaseActivationFunctor<T> {
typename Out,
typename dOut,
typename dX>
void operator()(Device d, X x, Out out, dOut dout, dX dx) const {
void operator()(Device d, X x UNUSED, Out out, dOut dout, dX dx) const {
auto tmp = static_cast<T>(threshold);
auto temp = ((out > -tmp) * (out < tmp)).template cast<T>();
dx.device(d) = dout * (static_cast<T>(1) - (-out).exp()) * temp;
......
......@@ -110,7 +110,7 @@ static std::unique_ptr<CUDAGraphWithInOuts> CaptureCUDAGraph(
inputs.insert(inputs.end(), input_tensors.begin(), input_tensors.end());
}
auto func = [&](const std::vector<const phi::DenseTensor *> &inputs) {
auto func = [&](const std::vector<const phi::DenseTensor *> &inputs UNUSED) {
callable(ctx);
std::vector<phi::DenseTensor *> outputs;
for (const auto &name : output_names) {
......
......@@ -25,7 +25,7 @@ namespace math {
template <typename T>
class BeamSearchFunctor<phi::CPUContext, T> {
public:
void operator()(const phi::CPUContext &context,
void operator()(const phi::CPUContext &context UNUSED,
const phi::DenseTensor *pre_ids,
const phi::DenseTensor *pre_scores,
const phi::DenseTensor *ids,
......
......@@ -58,7 +58,7 @@ class MemcpyD2HFunctor {
}
template <typename T>
void operator()(const T &v) const {
void operator()(const T &v UNUSED) const {
PADDLE_ENFORCE_EQ(
true,
false,
......
......@@ -403,7 +403,8 @@ class FCMKLDNNKernel : public framework::OpKernel<T_in> {
}));
}
void PrepareSrcMem(const std::shared_ptr<dnnl::inner_product_forward>& fc_p,
void PrepareSrcMem(const std::shared_ptr<dnnl::inner_product_forward>& fc_p
UNUSED,
const std::shared_ptr<dnnl::memory>& src_mem,
const phi::DenseTensor* x,
const dnnl::engine& engine) const {
......
......@@ -411,7 +411,7 @@ class MatMulMKLDNNKernel : public paddle::framework::OpKernel<T> {
}
private:
void CalculateMatrixDims(const ExecutionContext &ctx,
void CalculateMatrixDims(const ExecutionContext &ctx UNUSED,
const std::vector<int64_t> &x_dims,
const std::vector<int64_t> &y_dims,
std::vector<int64_t> *x_bd_dims,
......
......@@ -48,7 +48,7 @@ struct SequenceExpandGradFunctor {
template <typename T>
struct SequenceExpandFunctor<phi::CPUContext, T> {
void operator()(const phi::CPUContext& context,
void operator()(const phi::CPUContext& context UNUSED,
const phi::DenseTensor& x,
const phi::Vector<size_t>& x_lod, /*expand source lod*/
const phi::Vector<size_t>& ref_lod, /*expand referenced lod*/
......
......@@ -1115,7 +1115,7 @@ class PReluOneDNNHandler
const DenseTensor& x,
const DenseTensor& weights,
const std::string& mode,
const std::string& data_format,
const std::string& data_format UNUSED,
const bool is_test)
: OneDNNHandlerNoCachingT<T, dnnl::prelu_forward, dnnl::prelu_backward>(
engine, cpu_place) {
......
......@@ -189,7 +189,7 @@ struct CUBlas<double> {
}
template <typename... ARGS>
static void GEMM_EX(ARGS... args) {
static void GEMM_EX(ARGS... args UNUSED) {
PADDLE_THROW(
phi::errors::Unimplemented("Currently there are not cublasDgemmEx."));
}
......
......@@ -209,8 +209,8 @@ void BilinearInterpKernel(
int out_w,
const std::vector<float>& scale,
const std::string& interp_method,
bool align_corners,
int align_mode,
bool align_corners UNUSED,
int align_mode UNUSED,
DenseTensor* output) {
InterpolateKernel<T, Context>(ctx,
x,
......
......@@ -24,9 +24,9 @@ template <typename T, typename Context>
void Pad3dKernel(const Context& dev_ctx,
const DenseTensor& x,
const IntArray& paddings,
const std::string& mode,
const std::string& mode UNUSED,
float pad_value,
const std::string& data_format,
const std::string& data_format UNUSED,
DenseTensor* out) {
PadOpKernel<T, Context>(dev_ctx, x, paddings.GetData(), pad_value, out);
}
......
......@@ -53,7 +53,7 @@ void ReshapeGradKernel<phi::XPUContext>(const XPUContext& dev_ctx,
template <typename Context>
void ReshapeDoubleGradKernel(const Context& dev_ctx,
const DenseTensor& out_grad,
const DenseTensor& out_grad UNUSED,
const DenseTensor& x_grad_grad,
DenseTensor* out_grad_grad) {
ReshapeGradKernel(dev_ctx, x_grad_grad, out_grad_grad);
......
......@@ -81,7 +81,7 @@ void ReshapeKernel(const Context& dev_ctx,
const DenseTensor& x,
const IntArray& shape,
DenseTensor* out,
DenseTensor* xshape) {
DenseTensor* xshape UNUSED) {
ReshapeInferKernel(dev_ctx, x, shape, out);
}
......
......@@ -22,11 +22,11 @@
namespace phi {
template <typename Context>
void ShareBufferKernel(const Context &dev_ctx,
void ShareBufferKernel(const Context &dev_ctx UNUSED,
const std::vector<const DenseTensor *> &x,
const std::vector<bool> &share_dims_and_dtype,
const std::vector<bool> &share_dims_and_dtype UNUSED,
std::vector<DenseTensor *> out,
std::vector<DenseTensor *> xout) {
std::vector<DenseTensor *> xout UNUSED) {
PADDLE_ENFORCE_EQ(
x.size(),
out.size(),
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册