未验证 提交 10758725 编写于 作者: G Galaxy1458 提交者: GitHub

test,test=develop (#53811)

上级 c1f4005a
......@@ -32,7 +32,7 @@ namespace operators {
template <typename T, typename DeviceContext>
class AllToAllOpCPUKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
void Compute(const framework::ExecutionContext& ctx UNUSED) const override {
PADDLE_THROW(platform::errors::Unavailable(
"Do not support alltoall for cpu kernel now."));
}
......
......@@ -28,7 +28,7 @@ namespace operators {
template <typename T, typename DeviceContext>
class CIdentityOpCPUKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
void Compute(const framework::ExecutionContext& ctx UNUSED) const override {
PADDLE_THROW(platform::errors::Unavailable(
"Do not support c_identity for cpu kernel now."));
}
......
......@@ -30,7 +30,7 @@ namespace operators {
template <typename T, typename DeviceContext>
class CReduceScatterOpCPUKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
void Compute(const framework::ExecutionContext& ctx UNUSED) const override {
PADDLE_THROW(platform::errors::Unimplemented(
"Unimplemented cpu kernel for CReduceScatterOp."));
}
......
......@@ -28,7 +28,7 @@ namespace operators {
template <typename T, typename DeviceContext>
class CSplitOpCPUKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
void Compute(const framework::ExecutionContext& ctx UNUSED) const override {
PADDLE_THROW(platform::errors::Unavailable(
"Do not support c_split for cpu kernel now."));
}
......
......@@ -22,7 +22,7 @@ namespace phi {
template <typename T>
void index_put_kernel(const int64_t N,
const T* x,
const T* x UNUSED,
const T* vals,
const int64_t** indices,
const phi::DDim& stride,
......
......@@ -20,10 +20,10 @@
namespace phi {
template <typename T, typename Context>
void ReduceScatterKernel(const Context& dev_ctx,
const DenseTensor& x,
int nranks,
DenseTensor* out) {
void ReduceScatterKernel(const Context& dev_ctx UNUSED,
const DenseTensor& x UNUSED,
int nranks UNUSED,
DenseTensor* out UNUSED) {
PADDLE_THROW(
errors::Unimplemented("Unimplemented cpu kernel for CReduceScatterOp."));
}
......
......@@ -275,7 +275,7 @@ void RnnFunc(const Context& dev_ctx,
DenseTensor* dropout_mask,
int num_layers,
int gate_num,
int input_size,
int input_size UNUSED,
int hidden_size,
bool is_bidirec,
const std::string& cell_type,
......
......@@ -398,12 +398,12 @@ struct LambBetaPowUpdateFunctor {
template <typename MT>
struct LambBetaPowUpdateFunctor<MT, /*NeedUpdateBetaPow=*/false> {
void SetBetaPows(const MT* beta1pow,
const MT* beta2pow,
MT* beta1pow_out,
MT* beta2pow_out,
MT beta1,
MT beta2) {}
void SetBetaPows(const MT* beta1pow UNUSED,
const MT* beta2pow UNUSED,
MT* beta1pow_out UNUSED,
MT* beta2pow_out UNUSED,
MT beta1 UNUSED,
MT beta2 UNUSED) {}
HOSTDEVICE void UpdateBetaPow(size_t) const {}
};
......
......@@ -70,7 +70,9 @@ class RowwiseMean2D<phi::GPUContext, T> {
template <typename T>
class RowwiseMean2D<phi::CPUContext, T> {
public:
RowwiseMean2D(int left, int right, const DeviceContext& dev_ctx) {}
RowwiseMean2D(int left UNUSED,
int right UNUSED,
const DeviceContext& dev_ctx UNUSED) {}
void operator()(const phi::CPUContext& context,
const DenseTensor& input,
......
......@@ -204,7 +204,7 @@ struct TensorSetConstantWithPlace
: context_(context), tensor_(tensor), value_(value) {}
template <typename Place>
void operator()(Place place) const {
void operator()(Place place UNUSED) const {
set_constant_with_place<Place>(context_, tensor_, value_);
}
......
......@@ -73,7 +73,7 @@ class MaxPoolGrad {
public:
static constexpr bool use_x = true;
HOSTDEVICE inline void compute(
const T& x, const T& y, const T& dy, T scale, T* dx) {
const T& x, const T& y, const T& dy, T scale UNUSED, T* dx) {
*dx += dy * static_cast<T>(x == y);
}
};
......
......@@ -190,7 +190,7 @@ template struct SelectedRowsAddTensor<phi::CPUContext, double>;
template <typename T>
struct SelectedRowsAddTo<phi::CPUContext, T> {
void operator()(const phi::CPUContext& context,
void operator()(const phi::CPUContext& context UNUSED,
const phi::SelectedRows& input1,
const int64_t input2_offset,
phi::SelectedRows* input2) {
......
......@@ -97,7 +97,7 @@ static void fast_mem_init(void* dest,
template <typename T>
class PaddingLoDTensorFunctor<phi::CPUContext, T> {
public:
void operator()(const phi::CPUContext& context,
void operator()(const phi::CPUContext& context UNUSED,
const phi::DenseTensor& seq_tensor,
phi::DenseTensor* pad_tensor,
const phi::DenseTensor& pad_value,
......@@ -157,7 +157,7 @@ class PaddingLoDTensorFunctor<phi::CPUContext, T> {
template <typename T>
class UnpaddingLoDTensorFunctor<phi::CPUContext, T> {
public:
void operator()(const phi::CPUContext& context,
void operator()(const phi::CPUContext& context UNUSED,
const phi::DenseTensor& pad_tensor,
phi::DenseTensor* seq_tensor,
int pad_seq_len = -1,
......
......@@ -51,9 +51,9 @@ inline static size_t TotalSequenceLength(
inline static void CheckDims(const phi::DDim& seq_tensor_dims,
const phi::DDim& pad_tensor_dims,
const phi::Vector<size_t>& seq_offset,
int64_t padded_seq_len,
int64_t step_width,
const PadLayout& layout) {
int64_t padded_seq_len UNUSED,
int64_t step_width UNUSED,
const PadLayout& layout UNUSED) {
PADDLE_ENFORCE_EQ(
static_cast<size_t>(seq_tensor_dims[0]),
seq_offset.back(),
......
......@@ -31,7 +31,9 @@ static const std::vector<T> &ToVector(const std::vector<T> &vec) {
}
template <typename T>
static std::vector<T> ToVector(const T *x, size_t n, const phi::Place &place) {
static std::vector<T> ToVector(const T *x,
size_t n,
const phi::Place &place UNUSED) {
#ifdef __NVCC__
if (place.GetType() == phi::AllocationType::GPU) {
using CopyT = typename std::
......
......@@ -370,7 +370,7 @@ void FusedMatmulKernel(const Context &dev_ctx,
const std::vector<int> &fused_transpose_Y,
const std::vector<int> &fused_reshape_Out,
const std::vector<int> &fused_transpose_Out,
const std::string &mkldnn_data_type,
const std::string &mkldnn_data_type UNUSED,
const float scale_x,
const float scale_y,
const float scale_in_eltwise,
......
......@@ -277,7 +277,7 @@ bool IsContinuous(const Type &weight_list) {
}
template <typename T>
void WeightToTensor(const Place &place,
void WeightToTensor(const Place &place UNUSED,
gpuStream_t stream,
const std::vector<const DenseTensor *> &weight_list,
DenseTensor *weight) {
......
......@@ -44,7 +44,7 @@ void TransposeGradKernel(const Context& dev_ctx,
template <typename T, typename Context>
void TransLayoutGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& x UNUSED,
const DenseTensor& out_grad,
const std::vector<int>& axis,
DenseTensor* x_grad) {
......
......@@ -112,7 +112,7 @@ void SumCsrGradKernel(const Context& dev_ctx,
const SparseCsrTensor& x,
const SparseCsrTensor& dout,
const IntArray& axis,
bool keep_dim,
bool keep_dim UNUSED,
SparseCsrTensor* dx) {
EmptyLikeCsrKernel<T, Context>(dev_ctx, x, dx);
unsigned int n_dim = axis.size();
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册