未验证 提交 3e1fffea 编写于 作者: G Galaxy1458 提交者: GitHub

remove some [-Wunsed-parameter] warning (#53689)

* test,test=develop

* test,test=develop

* test,test=develop

* test,test=develop

* test,test=develop

* test,test=develop
上级 8ed01e8d
...@@ -101,12 +101,12 @@ class ProcessGroupWithStream : public ProcessGroup { ...@@ -101,12 +101,12 @@ class ProcessGroupWithStream : public ProcessGroup {
} }
std::shared_ptr<ProcessGroup::Task> AllGather( std::shared_ptr<ProcessGroup::Task> AllGather(
phi::DenseTensor* out_tensor, phi::DenseTensor* out_tensor UNUSED,
const phi::DenseTensor& in_tensor, const phi::DenseTensor& in_tensor UNUSED,
int64_t offset, int64_t offset UNUSED,
int64_t numel, int64_t numel UNUSED,
bool sync_op, bool sync_op UNUSED,
bool use_calc_stream) override { bool use_calc_stream UNUSED) override {
PADDLE_THROW(phi::errors::Unimplemented( PADDLE_THROW(phi::errors::Unimplemented(
"ProcessGroupWithStream (%s) does not support all_gather.", "ProcessGroupWithStream (%s) does not support all_gather.",
GetBackendName())); GetBackendName()));
...@@ -125,11 +125,11 @@ class ProcessGroupWithStream : public ProcessGroup { ...@@ -125,11 +125,11 @@ class ProcessGroupWithStream : public ProcessGroup {
} }
std::shared_ptr<ProcessGroup::Task> AllReduce( std::shared_ptr<ProcessGroup::Task> AllReduce(
phi::DenseTensor* out_tensor, phi::DenseTensor* out_tensor UNUSED,
const phi::DenseTensor& in_tensor, const phi::DenseTensor& in_tensor UNUSED,
const AllreduceOptions& opts, const AllreduceOptions& opts UNUSED,
bool sync_op, bool sync_op UNUSED,
bool use_calc_stream) override { bool use_calc_stream UNUSED) override {
PADDLE_THROW(phi::errors::Unimplemented( PADDLE_THROW(phi::errors::Unimplemented(
"ProcessGroupWithStream (%s) does not support all_reduce.", "ProcessGroupWithStream (%s) does not support all_reduce.",
GetBackendName())); GetBackendName()));
...@@ -150,12 +150,12 @@ class ProcessGroupWithStream : public ProcessGroup { ...@@ -150,12 +150,12 @@ class ProcessGroupWithStream : public ProcessGroup {
} }
std::shared_ptr<ProcessGroup::Task> AllToAll( std::shared_ptr<ProcessGroup::Task> AllToAll(
phi::DenseTensor* out_tensor, phi::DenseTensor* out_tensor UNUSED,
const phi::DenseTensor& in_tensor, const phi::DenseTensor& in_tensor UNUSED,
const std::vector<int64_t>& out_size_each_rank, const std::vector<int64_t>& out_size_each_rank UNUSED,
const std::vector<int64_t>& in_size_each_rank, const std::vector<int64_t>& in_size_each_rank UNUSED,
bool sync_op, bool sync_op UNUSED,
bool use_calc_stream) override { bool use_calc_stream UNUSED) override {
PADDLE_THROW(phi::errors::Unimplemented( PADDLE_THROW(phi::errors::Unimplemented(
"ProcessGroupWithStream (%s) does not support all_to_all.", "ProcessGroupWithStream (%s) does not support all_to_all.",
GetBackendName())); GetBackendName()));
...@@ -174,11 +174,11 @@ class ProcessGroupWithStream : public ProcessGroup { ...@@ -174,11 +174,11 @@ class ProcessGroupWithStream : public ProcessGroup {
} }
std::shared_ptr<ProcessGroup::Task> Broadcast( std::shared_ptr<ProcessGroup::Task> Broadcast(
phi::DenseTensor* out_tensor, phi::DenseTensor* out_tensor UNUSED,
const phi::DenseTensor& in_tensor, const phi::DenseTensor& in_tensor UNUSED,
const BroadcastOptions& opts, const BroadcastOptions& opts UNUSED,
bool sync_op, bool sync_op UNUSED,
bool use_calc_stream) override { bool use_calc_stream UNUSED) override {
PADDLE_THROW(phi::errors::Unimplemented( PADDLE_THROW(phi::errors::Unimplemented(
"ProcessGroupWithStream (%s) does not support broadcast.", "ProcessGroupWithStream (%s) does not support broadcast.",
GetBackendName())); GetBackendName()));
...@@ -195,11 +195,12 @@ class ProcessGroupWithStream : public ProcessGroup { ...@@ -195,11 +195,12 @@ class ProcessGroupWithStream : public ProcessGroup {
/*use_calc_stream*/ false); /*use_calc_stream*/ false);
} }
std::shared_ptr<ProcessGroup::Task> Reduce(phi::DenseTensor* out_tensor, std::shared_ptr<ProcessGroup::Task> Reduce(
const phi::DenseTensor& in_tensor, phi::DenseTensor* out_tensor UNUSED,
const ReduceOptions& opts, const phi::DenseTensor& in_tensor UNUSED,
bool sync_op, const ReduceOptions& opts UNUSED,
bool use_calc_stream) override { bool sync_op UNUSED,
bool use_calc_stream UNUSED) override {
PADDLE_THROW(phi::errors::Unimplemented( PADDLE_THROW(phi::errors::Unimplemented(
"ProcessGroupWithStream (%s) does not support reduce.", "ProcessGroupWithStream (%s) does not support reduce.",
GetBackendName())); GetBackendName()));
...@@ -218,11 +219,11 @@ class ProcessGroupWithStream : public ProcessGroup { ...@@ -218,11 +219,11 @@ class ProcessGroupWithStream : public ProcessGroup {
} }
std::shared_ptr<ProcessGroup::Task> ReduceScatter( std::shared_ptr<ProcessGroup::Task> ReduceScatter(
phi::DenseTensor* out_tensor, phi::DenseTensor* out_tensor UNUSED,
const phi::DenseTensor& in_tensor, const phi::DenseTensor& in_tensor UNUSED,
const ReduceScatterOptions& opts, const ReduceScatterOptions& opts UNUSED,
bool sync_op, bool sync_op UNUSED,
bool use_calc_stream) override { bool use_calc_stream UNUSED) override {
PADDLE_THROW(phi::errors::Unimplemented( PADDLE_THROW(phi::errors::Unimplemented(
"ProcessGroupWithStream (%s) does not support reduce_scatter.", "ProcessGroupWithStream (%s) does not support reduce_scatter.",
GetBackendName())); GetBackendName()));
...@@ -239,11 +240,12 @@ class ProcessGroupWithStream : public ProcessGroup { ...@@ -239,11 +240,12 @@ class ProcessGroupWithStream : public ProcessGroup {
/*use_calc_stream*/ false); /*use_calc_stream*/ false);
} }
std::shared_ptr<ProcessGroup::Task> Scatter(phi::DenseTensor* out_tensor, std::shared_ptr<ProcessGroup::Task> Scatter(
const phi::DenseTensor& in_tensor, phi::DenseTensor* out_tensor UNUSED,
const ScatterOptions& opts, const phi::DenseTensor& in_tensor UNUSED,
bool sync_op, const ScatterOptions& opts UNUSED,
bool use_calc_stream) override { bool sync_op UNUSED,
bool use_calc_stream UNUSED) override {
PADDLE_THROW(phi::errors::Unimplemented( PADDLE_THROW(phi::errors::Unimplemented(
"ProcessGroupWithStream (%s) does not support scatter.", "ProcessGroupWithStream (%s) does not support scatter.",
GetBackendName())); GetBackendName()));
...@@ -284,12 +286,13 @@ class ProcessGroupWithStream : public ProcessGroup { ...@@ -284,12 +286,13 @@ class ProcessGroupWithStream : public ProcessGroup {
use_calc_stream); use_calc_stream);
} }
std::shared_ptr<ProcessGroup::Task> Recv(phi::DenseTensor* tensor, std::shared_ptr<ProcessGroup::Task> Recv(phi::DenseTensor* tensor UNUSED,
int src_rank, int src_rank UNUSED,
int64_t offset, int64_t offset UNUSED,
int64_t numel, int64_t numel UNUSED,
bool sync_op, bool sync_op UNUSED,
bool use_calc_stream) override { bool use_calc_stream
UNUSED) override {
PADDLE_THROW(phi::errors::Unimplemented( PADDLE_THROW(phi::errors::Unimplemented(
"ProcessGroupWithStream (%s) does not support recv.", "ProcessGroupWithStream (%s) does not support recv.",
GetBackendName())); GetBackendName()));
...@@ -330,12 +333,13 @@ class ProcessGroupWithStream : public ProcessGroup { ...@@ -330,12 +333,13 @@ class ProcessGroupWithStream : public ProcessGroup {
use_calc_stream); use_calc_stream);
} }
std::shared_ptr<ProcessGroup::Task> Send(const phi::DenseTensor& tensor, std::shared_ptr<ProcessGroup::Task> Send(
int dst_rank, const phi::DenseTensor& tensor UNUSED,
int64_t offset, int dst_rank UNUSED,
int64_t numel, int64_t offset UNUSED,
bool sync_op, int64_t numel UNUSED,
bool use_calc_stream) override { bool sync_op UNUSED,
bool use_calc_stream UNUSED) override {
PADDLE_THROW(phi::errors::Unimplemented( PADDLE_THROW(phi::errors::Unimplemented(
"ProcessGroupWithStream (%s) does not support send.", "ProcessGroupWithStream (%s) does not support send.",
GetBackendName())); GetBackendName()));
......
...@@ -535,25 +535,25 @@ class HeterService : public PsService { ...@@ -535,25 +535,25 @@ class HeterService : public PsService {
bool IsExit() { return is_exit_; } bool IsExit() { return is_exit_; }
private: private:
int32_t stop_profiler(const PsRequestMessage& request, int32_t stop_profiler(const PsRequestMessage& request UNUSED,
PsResponseMessage& response, // NOLINT PsResponseMessage& response UNUSED, // NOLINT
brpc::Controller* cntl) { brpc::Controller* cntl UNUSED) {
platform::DisableProfiler( platform::DisableProfiler(
platform::EventSortingKey::kDefault, platform::EventSortingKey::kDefault,
string::Sprintf("heter_worker_%s_profile", endpoint_)); string::Sprintf("heter_worker_%s_profile", endpoint_));
return 0; return 0;
} }
int32_t start_profiler(const PsRequestMessage& request, int32_t start_profiler(const PsRequestMessage& request UNUSED,
PsResponseMessage& response, // NOLINT PsResponseMessage& response UNUSED, // NOLINT
brpc::Controller* cntl) { brpc::Controller* cntl UNUSED) {
platform::EnableProfiler(platform::ProfilerState::kAll); platform::EnableProfiler(platform::ProfilerState::kAll);
return 0; return 0;
} }
int32_t stop_heter_worker(const PsRequestMessage& request, int32_t stop_heter_worker(const PsRequestMessage& request,
PsResponseMessage& response, // NOLINT PsResponseMessage& response UNUSED, // NOLINT
brpc::Controller* cntl) { brpc::Controller* cntl UNUSED) {
auto client_id = request.client_id(); auto client_id = request.client_id();
stop_cpu_worker_set_.insert(client_id); stop_cpu_worker_set_.insert(client_id);
if (stop_cpu_worker_set_.size() == fan_in_) { if (stop_cpu_worker_set_.size() == fan_in_) {
......
...@@ -65,7 +65,7 @@ inline std::shared_ptr<EagerLayoutTransformer> EagerLayoutAutotune( ...@@ -65,7 +65,7 @@ inline std::shared_ptr<EagerLayoutTransformer> EagerLayoutAutotune(
const std::string& op_name, const std::string& op_name,
const paddle::small_vector<std::vector<paddle::Tensor>, const paddle::small_vector<std::vector<paddle::Tensor>,
kSlotSmallVectorSize>& tensors_vector, kSlotSmallVectorSize>& tensors_vector,
T* attr) { T* attr UNUSED) {
// For lightly op like reduce // For lightly op like reduce
if (!(DesiredLayout() == phi::DataLayout::UNDEFINED)) { if (!(DesiredLayout() == phi::DataLayout::UNDEFINED)) {
VLOG(4) << "LayoutAutotune was unstarted. Current op :" << op_name; VLOG(4) << "LayoutAutotune was unstarted. Current op :" << op_name;
...@@ -81,7 +81,7 @@ inline std::shared_ptr<EagerLayoutTransformer> EagerLayoutAutotune( ...@@ -81,7 +81,7 @@ inline std::shared_ptr<EagerLayoutTransformer> EagerLayoutAutotune(
const paddle::small_vector<std::vector<paddle::Tensor>, const paddle::small_vector<std::vector<paddle::Tensor>,
kSlotSmallVectorSize>& tensors_vector, kSlotSmallVectorSize>& tensors_vector,
T1* axis, T1* axis,
T2* keep_dim) { T2* keep_dim UNUSED) {
// For lightly op like argmax // For lightly op like argmax
return EagerLayoutAutotune<T1>(op_name, tensors_vector, axis); return EagerLayoutAutotune<T1>(op_name, tensors_vector, axis);
} }
......
...@@ -226,7 +226,7 @@ class Metric { ...@@ -226,7 +226,7 @@ class Metric {
const std::string& pred_varname, const std::string& pred_varname,
const std::string& uid_varname, const std::string& uid_varname,
int metric_phase, int metric_phase,
int bucket_size = 1000000) { int bucket_size UNUSED = 1000000) {
label_varname_ = label_varname; label_varname_ = label_varname;
pred_varname_ = pred_varname; pred_varname_ = pred_varname;
uid_varname_ = uid_varname; uid_varname_ = uid_varname;
...@@ -297,7 +297,7 @@ class Metric { ...@@ -297,7 +297,7 @@ class Metric {
} }
virtual ~MultiTaskMetricMsg() {} virtual ~MultiTaskMetricMsg() {}
void add_data(const Scope* exe_scope, void add_data(const Scope* exe_scope,
const paddle::platform::Place& place) override { const paddle::platform::Place& place UNUSED) override {
std::vector<int64_t> cmatch_rank_data; std::vector<int64_t> cmatch_rank_data;
get_data<int64_t>(exe_scope, cmatch_rank_varname_, &cmatch_rank_data); get_data<int64_t>(exe_scope, cmatch_rank_varname_, &cmatch_rank_data);
std::vector<int64_t> label_data; std::vector<int64_t> label_data;
...@@ -378,7 +378,7 @@ class Metric { ...@@ -378,7 +378,7 @@ class Metric {
} }
virtual ~CmatchRankMetricMsg() {} virtual ~CmatchRankMetricMsg() {}
void add_data(const Scope* exe_scope, void add_data(const Scope* exe_scope,
const paddle::platform::Place& place) override { const paddle::platform::Place& place UNUSED) override {
std::vector<int64_t> cmatch_rank_data; std::vector<int64_t> cmatch_rank_data;
get_data<int64_t>(exe_scope, cmatch_rank_varname_, &cmatch_rank_data); get_data<int64_t>(exe_scope, cmatch_rank_varname_, &cmatch_rank_data);
std::vector<int64_t> label_data; std::vector<int64_t> label_data;
...@@ -502,7 +502,7 @@ class Metric { ...@@ -502,7 +502,7 @@ class Metric {
} }
virtual ~CmatchRankMaskMetricMsg() {} virtual ~CmatchRankMaskMetricMsg() {}
void add_data(const Scope* exe_scope, void add_data(const Scope* exe_scope,
const paddle::platform::Place& place) override { const paddle::platform::Place& place UNUSED) override {
std::vector<int64_t> cmatch_rank_data; std::vector<int64_t> cmatch_rank_data;
get_data<int64_t>(exe_scope, cmatch_rank_varname_, &cmatch_rank_data); get_data<int64_t>(exe_scope, cmatch_rank_varname_, &cmatch_rank_data);
std::vector<int64_t> label_data; std::vector<int64_t> label_data;
......
...@@ -257,9 +257,9 @@ struct OpKernelRegistrarFunctorEx<PlaceType, ...@@ -257,9 +257,9 @@ struct OpKernelRegistrarFunctorEx<PlaceType,
true, true,
I, I,
DataTypeAndKernelType...> { DataTypeAndKernelType...> {
void operator()(const char* op_type, void operator()(const char* op_type UNUSED,
const char* library_type, const char* library_type UNUSED,
int customized_type_value) const {} int customized_type_value UNUSED) const {}
}; };
template <typename PlaceType, size_t I, typename... DataTypeAndKernelType> template <typename PlaceType, size_t I, typename... DataTypeAndKernelType>
......
...@@ -334,7 +334,7 @@ struct SoftReluGradFunctor : public BaseActivationFunctor<T> { ...@@ -334,7 +334,7 @@ struct SoftReluGradFunctor : public BaseActivationFunctor<T> {
typename Out, typename Out,
typename dOut, typename dOut,
typename dX> typename dX>
void operator()(Device d, X x, Out out, dOut dout, dX dx) const { void operator()(Device d, X x UNUSED, Out out, dOut dout, dX dx) const {
auto tmp = static_cast<T>(threshold); auto tmp = static_cast<T>(threshold);
auto temp = ((out > -tmp) * (out < tmp)).template cast<T>(); auto temp = ((out > -tmp) * (out < tmp)).template cast<T>();
dx.device(d) = dout * (static_cast<T>(1) - (-out).exp()) * temp; dx.device(d) = dout * (static_cast<T>(1) - (-out).exp()) * temp;
......
...@@ -110,7 +110,7 @@ static std::unique_ptr<CUDAGraphWithInOuts> CaptureCUDAGraph( ...@@ -110,7 +110,7 @@ static std::unique_ptr<CUDAGraphWithInOuts> CaptureCUDAGraph(
inputs.insert(inputs.end(), input_tensors.begin(), input_tensors.end()); inputs.insert(inputs.end(), input_tensors.begin(), input_tensors.end());
} }
auto func = [&](const std::vector<const phi::DenseTensor *> &inputs) { auto func = [&](const std::vector<const phi::DenseTensor *> &inputs UNUSED) {
callable(ctx); callable(ctx);
std::vector<phi::DenseTensor *> outputs; std::vector<phi::DenseTensor *> outputs;
for (const auto &name : output_names) { for (const auto &name : output_names) {
......
...@@ -25,7 +25,7 @@ namespace math { ...@@ -25,7 +25,7 @@ namespace math {
template <typename T> template <typename T>
class BeamSearchFunctor<phi::CPUContext, T> { class BeamSearchFunctor<phi::CPUContext, T> {
public: public:
void operator()(const phi::CPUContext &context, void operator()(const phi::CPUContext &context UNUSED,
const phi::DenseTensor *pre_ids, const phi::DenseTensor *pre_ids,
const phi::DenseTensor *pre_scores, const phi::DenseTensor *pre_scores,
const phi::DenseTensor *ids, const phi::DenseTensor *ids,
......
...@@ -58,7 +58,7 @@ class MemcpyD2HFunctor { ...@@ -58,7 +58,7 @@ class MemcpyD2HFunctor {
} }
template <typename T> template <typename T>
void operator()(const T &v) const { void operator()(const T &v UNUSED) const {
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
true, true,
false, false,
......
...@@ -403,7 +403,8 @@ class FCMKLDNNKernel : public framework::OpKernel<T_in> { ...@@ -403,7 +403,8 @@ class FCMKLDNNKernel : public framework::OpKernel<T_in> {
})); }));
} }
void PrepareSrcMem(const std::shared_ptr<dnnl::inner_product_forward>& fc_p, void PrepareSrcMem(const std::shared_ptr<dnnl::inner_product_forward>& fc_p
UNUSED,
const std::shared_ptr<dnnl::memory>& src_mem, const std::shared_ptr<dnnl::memory>& src_mem,
const phi::DenseTensor* x, const phi::DenseTensor* x,
const dnnl::engine& engine) const { const dnnl::engine& engine) const {
......
...@@ -411,7 +411,7 @@ class MatMulMKLDNNKernel : public paddle::framework::OpKernel<T> { ...@@ -411,7 +411,7 @@ class MatMulMKLDNNKernel : public paddle::framework::OpKernel<T> {
} }
private: private:
void CalculateMatrixDims(const ExecutionContext &ctx, void CalculateMatrixDims(const ExecutionContext &ctx UNUSED,
const std::vector<int64_t> &x_dims, const std::vector<int64_t> &x_dims,
const std::vector<int64_t> &y_dims, const std::vector<int64_t> &y_dims,
std::vector<int64_t> *x_bd_dims, std::vector<int64_t> *x_bd_dims,
......
...@@ -48,7 +48,7 @@ struct SequenceExpandGradFunctor { ...@@ -48,7 +48,7 @@ struct SequenceExpandGradFunctor {
template <typename T> template <typename T>
struct SequenceExpandFunctor<phi::CPUContext, T> { struct SequenceExpandFunctor<phi::CPUContext, T> {
void operator()(const phi::CPUContext& context, void operator()(const phi::CPUContext& context UNUSED,
const phi::DenseTensor& x, const phi::DenseTensor& x,
const phi::Vector<size_t>& x_lod, /*expand source lod*/ const phi::Vector<size_t>& x_lod, /*expand source lod*/
const phi::Vector<size_t>& ref_lod, /*expand referenced lod*/ const phi::Vector<size_t>& ref_lod, /*expand referenced lod*/
......
...@@ -1115,7 +1115,7 @@ class PReluOneDNNHandler ...@@ -1115,7 +1115,7 @@ class PReluOneDNNHandler
const DenseTensor& x, const DenseTensor& x,
const DenseTensor& weights, const DenseTensor& weights,
const std::string& mode, const std::string& mode,
const std::string& data_format, const std::string& data_format UNUSED,
const bool is_test) const bool is_test)
: OneDNNHandlerNoCachingT<T, dnnl::prelu_forward, dnnl::prelu_backward>( : OneDNNHandlerNoCachingT<T, dnnl::prelu_forward, dnnl::prelu_backward>(
engine, cpu_place) { engine, cpu_place) {
......
...@@ -189,7 +189,7 @@ struct CUBlas<double> { ...@@ -189,7 +189,7 @@ struct CUBlas<double> {
} }
template <typename... ARGS> template <typename... ARGS>
static void GEMM_EX(ARGS... args) { static void GEMM_EX(ARGS... args UNUSED) {
PADDLE_THROW( PADDLE_THROW(
phi::errors::Unimplemented("Currently there are not cublasDgemmEx.")); phi::errors::Unimplemented("Currently there are not cublasDgemmEx."));
} }
......
...@@ -209,8 +209,8 @@ void BilinearInterpKernel( ...@@ -209,8 +209,8 @@ void BilinearInterpKernel(
int out_w, int out_w,
const std::vector<float>& scale, const std::vector<float>& scale,
const std::string& interp_method, const std::string& interp_method,
bool align_corners, bool align_corners UNUSED,
int align_mode, int align_mode UNUSED,
DenseTensor* output) { DenseTensor* output) {
InterpolateKernel<T, Context>(ctx, InterpolateKernel<T, Context>(ctx,
x, x,
......
...@@ -24,9 +24,9 @@ template <typename T, typename Context> ...@@ -24,9 +24,9 @@ template <typename T, typename Context>
void Pad3dKernel(const Context& dev_ctx, void Pad3dKernel(const Context& dev_ctx,
const DenseTensor& x, const DenseTensor& x,
const IntArray& paddings, const IntArray& paddings,
const std::string& mode, const std::string& mode UNUSED,
float pad_value, float pad_value,
const std::string& data_format, const std::string& data_format UNUSED,
DenseTensor* out) { DenseTensor* out) {
PadOpKernel<T, Context>(dev_ctx, x, paddings.GetData(), pad_value, out); PadOpKernel<T, Context>(dev_ctx, x, paddings.GetData(), pad_value, out);
} }
......
...@@ -53,7 +53,7 @@ void ReshapeGradKernel<phi::XPUContext>(const XPUContext& dev_ctx, ...@@ -53,7 +53,7 @@ void ReshapeGradKernel<phi::XPUContext>(const XPUContext& dev_ctx,
template <typename Context> template <typename Context>
void ReshapeDoubleGradKernel(const Context& dev_ctx, void ReshapeDoubleGradKernel(const Context& dev_ctx,
const DenseTensor& out_grad, const DenseTensor& out_grad UNUSED,
const DenseTensor& x_grad_grad, const DenseTensor& x_grad_grad,
DenseTensor* out_grad_grad) { DenseTensor* out_grad_grad) {
ReshapeGradKernel(dev_ctx, x_grad_grad, out_grad_grad); ReshapeGradKernel(dev_ctx, x_grad_grad, out_grad_grad);
......
...@@ -81,7 +81,7 @@ void ReshapeKernel(const Context& dev_ctx, ...@@ -81,7 +81,7 @@ void ReshapeKernel(const Context& dev_ctx,
const DenseTensor& x, const DenseTensor& x,
const IntArray& shape, const IntArray& shape,
DenseTensor* out, DenseTensor* out,
DenseTensor* xshape) { DenseTensor* xshape UNUSED) {
ReshapeInferKernel(dev_ctx, x, shape, out); ReshapeInferKernel(dev_ctx, x, shape, out);
} }
......
...@@ -22,11 +22,11 @@ ...@@ -22,11 +22,11 @@
namespace phi { namespace phi {
template <typename Context> template <typename Context>
void ShareBufferKernel(const Context &dev_ctx, void ShareBufferKernel(const Context &dev_ctx UNUSED,
const std::vector<const DenseTensor *> &x, const std::vector<const DenseTensor *> &x,
const std::vector<bool> &share_dims_and_dtype, const std::vector<bool> &share_dims_and_dtype UNUSED,
std::vector<DenseTensor *> out, std::vector<DenseTensor *> out,
std::vector<DenseTensor *> xout) { std::vector<DenseTensor *> xout UNUSED) {
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
x.size(), x.size(),
out.size(), out.size(),
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册